id
stringlengths
10
10
title
stringlengths
3
179
track
stringclasses
1 value
status
stringclasses
3 values
keywords
stringlengths
2
2.39k
primary_area
stringclasses
21 values
author
stringclasses
501 values
authorids
stringclasses
501 values
aff
stringclasses
1 value
aff_domain
stringclasses
1 value
position
stringclasses
1 value
rating
stringclasses
355 values
confidence
stringlengths
0
19
soundness
stringclasses
642 values
contribution
stringclasses
596 values
presentation
stringclasses
782 values
rating_avg
float64
0
9
confidence_avg
float64
0
5
soundness_avg
float64
0
4
contribution_avg
float64
0
4
presentation_avg
float64
0
4
corr_rating_confidence
float64
-1
1
project
stringclasses
1 value
github
stringclasses
1 value
Review
listlengths
2
10
4GcZSTqlkr
Tokenizer-Agnostic Transferable Attacks on Language Models for Enhanced Red Teaming
main
Active
Adversarial Attacks;Red Teaming;Transferable Attacks;AI Safety;Large Language Models
alignment, fairness, safety, privacy, and societal considerations
3;5;5;5
4;4;4;3
2;3;3;3
2;3;2;2
2;3;2;3
4.5
3.75
2.75
2.25
2.5
-0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "The approach’s transparency in generating adversarial examples for LLMs raises ethical concerns, particularly around misuse, and would benefit from a clearer discussion of safeguards to prevent potential exploitation." }, "flag_for_ethics_review": { "value": [ "Yes, Potentially harmful insights, methodologies and applications" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See Weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- TIARA presents a tokenizer-independent framework for transferable adversarial attacks, which effectively bypasses constraints associated with fixed tokenization schemes and gradient access, making it adaptable across diverse LLM architectures.\n- TIARA employs a multi-stage candidate selection process that iteratively refines adversarial candidates based on both validation and test metrics, maximizing attack success rates across multiple target models.\n- TIARA offers a systematic analysis of adversarial strategies, identifying categories such as formatting manipulation and instruction/context manipulation, which advance understanding of adversarial patterns that exploit LLM vulnerabilities." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces the Tokenizer-Independent Adversarial Red-teaming Approach (TIARA ), a framework designed to improve AI safety by automating the red teaming of large language models (LLMs). TIARA allows for the generation of transferable adversarial examples without the need for gradient access or fixed tokenizers, facilitating effective attacks across diverse model architectures. Its contributions include a tokenizer-agnostic method for generating adversarial inputs, a gradient-free optimization technique for exploring token-level perturbations, and an automated approach for discovering model vulnerabilities, which aim at identifying potential risks and advancing the development of more secure Chatbot systems." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- TIARA’s multi-stage candidate selection and extensive perturbation sampling require significant computational resources, making it less feasible for deployment in real-time scenarios.\n- While TIARA is gradient-free, it relies on direct access to model logits for loss computation, which may limit applicability to models where even logit-level access is restricted.\n- The paper does not extensively test TIARA against models equipped with advanced, system-level defensive mechanisms, making it unclear how resilient the approach is in more robustly defended environments.\n- TIARA lacks an evaluation against input transformation defences, such as synonym replacement or back-translation, which could easily disrupt the token-level perturbations TIARA relies on, potentially reducing its success rate in adversarial bypass attempts.\n- TIARA’s reliance on token-level perturbations to generate effective adversarial examples may limit its success on models or tasks where semantic-level manipulations are more impactful. This limitation suggests a need for a more flexible perturbation strategy that balances token- and semantic-level alterations." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. I wonder the intuition and design insight.\n2. How to scale to LLM with tokenizer of much larger vocabulary?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "+ An effective attack that outperforms previous jailbreak.\n+ Evaluation is quite thorough" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a novel jailbreak method towards LLM based on the attack transferability between the source LLM and the target LLM to achieve tokenzier-agnostic.\nComparing to previous gradient based attack, the attack does not require gradient to optimize jailbreak prompt.\nInstead, at each step the attack select best perturbed candicate and use multiple LLM to enhance the attack.\nFinally, through experiments, the authors show that the proposed attack outperforms pervious methods and has high attack success rate on commercial LLM like GPT-3.5." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The idea of candicate search has been explored in previous work and the usage of several LLMs to increase the attack transferability is a common technique (which is widely used to increase the adversarial tranferability for image classification models).\nSpecifically, BEAST attack [1] also investigates how to generate jailbreak prompt without using the gradient. Their beam search-alike method is similar to the TIARA attack here because it essentially selects the potentially best candidates to the target LLM. The differences I see include: TIARA uses multiple source LLM, multiple loss (teacher forcing and autoregressive) and disconnect the tokenizer and source LLM. However, these improvements are not milestone techniques in further improvement jailbreak attack.\n\n- Some design choices, intuition and attack insight, are not clear. For example, how to design the teacher-forcing loss and autoregressive loss? why choose these two loss functions? The use of multiple source LLM can increase the jailbreak attack success rate, but what are the potential reason behind it? There is neither qualitative nor quatitative analysis.\n\n- The tokenizer-agnostic is somehow overclaimed and limited when the target LLM has larger vocabulary. As show in Figure 2, the GPT-4 has lowest ASR comparing to the rest. As the api's tiktoken shows, GPT-4's vocabulary size is much larger than existing opensoured LLMs (which are similar to or better than the GPT-3.5). If future LLM (that should be red-teamed) has larger vocabulary, TIARA may be limited in this case. In other words, my concern is that TIARA may be only effective in red-teaming existing LLMs.\n\n[1] Fast Adversarial Attacks on Language Models In One GPU Minute. ICML 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- What was the typical number of iterations for an attack success to be found? Was there a relationship between the longer the optimization and the higher attack transferability?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The method is empirically effective, simple to implement, and investigates a realistic threat model. Furthermore, with the current perturbation function in the paper we can see that enough random perturbations are sufficient to eventually converge to a jailbreak. I think this is an interesting find that jailbreaks can essentially be brute forced in such a manner on a large scale with sufficient (though not unattainable) compute resources. \n\n- Using the loss function proposed in the paper combined with an evolutionary search strategy is interesting, and is a different approach compared to popular methods using LLM as a judge to guide evolutionary based strategies (e.g. TAP/PAIR) - potentially this indicates that powerful LLMs as fitness functions in this style of attack may not be needed." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes an algorithm for generating attacks that are highly transferable between open source models and closed source target models. With gradients not being computable on the closed source models (and potentially query numbers being limited), relying on transfer attacks can be a realistic and practical attack method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- It would have been useful too compare with different perturbation functions: the paper shows a random approach, but as highlighted in the work it could be any function. Indeed the random perturbation function is very computationally expensive - 1024 pertubrations per iterations, up with to 1k iterations: in other words, it could be over 1 million separate model calls. Further, additional perturbation functions could highlight if the proposed loss is the most effective under different perturbation strategies.\n\n- The paper flow and balance could be improved: for example, the description of the Multi-Stage Candidate Selection which is a key competent of the algorithm (and is listed as a key contribution of the paper) is in the appendix, and could use a clearer explanation of the steps. \n\n- It is unclear why Llama3 was used as a transfer model in combination with others, but not used either stand alone, or not evaluated against direct attacks in Table 2." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "N/A" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. **Excellent transfer attack performance.** The reported results are inspiring, especially those against GPT models.\n2. **The practicality of the method.** Although an ensemble of multiple models is quite an old talk for universal adversarial attack, it is arguably the most feasible method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores transfer attacks against LLMs. Unlike previous works that utilize gradient to optimize a suitable prompt, this work employs a perturbation-based strategy. The method employs additional models as oracle models and judges the fitness of the current adversarial prompt with the weighted scores of multiple source models. The scoring is based on two loss functions, in which teacher-forcing loss ensures the completion of the target prompt and the auto-regressive loss avoids the occurrence of the deviant tokens. The attack performance is demonstrated via comprehensive experiments. The attacks against GPT models is impressive." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. **Inadequate exposure of method details and experiment details.** What about the setting of temperature and top_k for both the auto-regressive loss and evaluation? What about the choice of the subset of candidate tokens that will be sampled mentioned in line 262. After reading the paper, I am still not sure whether the perturbation tokenizer corresponds to the tokenizer of the source model or the one of the target model.\n2. **Ad-hoc attack configuration.** The choice of source models is ad-hoc and will impact the attack performance a lot, as mentioned in Section 4.1. Experiments about the lower-bound and upper-bound performance are in demand, corresponding to the weakest source model and the strongest source model.\n3. **The analysis of tokenizer behaviors is not in-depth enough.** Although different models use distinct tokenizers, it does not mean they exhibit totally different behaviors. For example, for the same tokenization algorithm, a larger vocabulary of tokenizer A may contain the smaller vocabulary of tokenizer B. Analyzing the overlap between different tokenizer behaviors, especially on the crafted prompts, will help us understand whether the transfer attack leverage semantics or just token combinations to make sense.\n4. **Exploration of Attack stability.** As demonstrated in Table 2, the TIARA yields an almost 100\\% ASR. As TIARA is in essence an evolutionary method, the stability of the success is amazing. Can the authors give more details about the evolutionary trajectories and analyze the countable failed cases?\n5. **Unclear standpoint.** Although the paper emphasizes the tokenizer-agnostic feature of their attack, it is unclear about the difference between tokenizer-agnostic and model-agnostic due to the superficial exploration of the tokenizer behavior. The authors should add a discussion section that clearly defines and distinguishes between tokenizer-agnostic and model-agnostic approaches, and explicitly positions your method within this framework.\n\nI would like to raise my score if the authors can answer my concerns, especially the one about tokenizer behavior (weakness 3)." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We introduce TIARA, a tokenizer-independent method for generating transferable adversarial attacks on language models for enhanced red teaming." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024tokenizeragnostic,\ntitle={Tokenizer-Agnostic Transferable Attacks on Language Models for Enhanced Red Teaming},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4GcZSTqlkr},\nnote={under review}\n}" }, "abstract": { "value": "Large Language Models (LLMs) have become increasingly prevalent, raising concerns about potential vulnerabilities and misuse. Effective red teaming methods are crucial for improving AI safety, yet current approaches often require access to model internals or rely on specific jailbreak techniques. We present TIARA (Tokenizer-Independent Adversarial Red-teaming Approach), a novel method for automated red teaming of LLMs that advances the state-of-the-art in transferable adversarial attacks. Unlike previous token-level methods, TIARA eliminates constraints on gradient access and fixed tokenizer, enabling simultaneous attacks on multiple models with diverse architectures. By leveraging a combination of teacher-forcing and auto-regressive loss functions with a multi-stage candidate selection procedure, it achieves superior performance without relying on gradient information or dedicated attacker models. TIARA attains an 82.9\\% attack success rate on GPT-3.5 Turbo and 51.2\\% on Gemini Pro, surpassing previous transfer and direct attacks on the HarmBench benchmark. We provide insights into adversarial string length effects and present a qualitative analysis of discovered adversarial techniques. This work contributes to AI safety by offering a robust, versatile tool for identifying potential vulnerabilities in LLMs, facilitating the development of safer AI systems." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Adversarial Attacks", "Red Teaming", "Transferable Attacks", "AI Safety", "Large Language Models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/61cc817d83bfdd1fe57925009f9c91c4ecc765ba.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/a32b4ace946848188da27d0b33504f6cdf3119c3.zip" }, "title": { "value": "Tokenizer-Agnostic Transferable Attacks on Language Models for Enhanced Red Teaming" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4HAXypZfsm
Supervised Disentanglement Under Hidden Correlations
main
Active
Disentangled representation learning;Supervised representation learning;Mutual information;Causal graph analysis;Hidden Correlations
learning theory
3;5;5;6
3;3;3;3
2;2;3;3
2;2;2;2
2;2;2;3
4.75
3
2.5
2
2.25
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- From Section 3 and 4, only one attribution mode is considered. For more complex or practical scenarios, all attributions may have their mode variables. Whether the SD-HC can efficiently and effectively address them?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- This paper establishes a novel theory for supervised disentanglement with hidden correlations. It consists of both necessary and sufficient conditions, serving as a significant improvememt for existing works.\n- The experiments are comprehensive and the results are good enough." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a novel supervised disentanglement method, SD-HC, designed to address hidden correlations in multi-modal data, which traditional DRL methods often overlook. SD-HC focuses on preserving essential mode information within attributes by minimizing mode-based conditional mutual information, thus achieving better disentanglement and predictive accuracy under both hidden and attribute correlations. The authors theoretically prove that mode-based conditional mutual information minimization is both necessary and sufficient for effective disentanglement in complex, correlated datasets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The motivation of the problem setting is not clearly discussed. In Figure 2, to study the hidden correlations among attributions, why not directly study $a_k$ and $a_{-k}$? The role or advantage of introducing the variable $m_k$ is not clear for me.\n- It seems that the performance of SD-HC heavily relies on the mode label estimation. First, in real applications, how to determine the number of modes $N_m$? In addition, for complex data pattern, the data representations are not high-quality and the clutering is not accurate accordingly. In such case, is there any techniques to guarantee good performance?\n- There are some ambiguous concepts in this paper. For example, 1) \"Data mode\". I do not find its concrete definition. This term is not common in machine learning and its meaning varies with contexts. 2) \"Multi-modal\". It seems not to refer to the commonly used meaning, such as text and image. More strict definitions is required to make this paper more clear.\n- The writting needs to be further improved. For example, in line 155, $c^a$ and $c^m$ should be boldface." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See the weakness part." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper studies an important problem in disentangled representation learning by considering hidden correlations, which are prevalent in real-world data.\n2. The experimental evaluation is thorough, including both synthetic and real-world datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the problem of disentangled representation learning in the presence of hidden correlations, which are common in real-world data. The authors consider a multi-modal data distribution where certain attributes may induce complex modes correlated with other attributes. To tackle this issue, they first investigate the necessary and sufficient conditions for supervised disentanglement under the proposed data-generating process. Subsequently, they introduce the SD-HC method for learning disentangled representations with hidden correlations. The proposed method is validated on both synthetic data and five real-world datasets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The major issue lies in the lack of detailed theoretical explanation:\n - The causal graph presented in Figure 3 is confusing. Since $z = f(x)$ is a function of $x$, there should be a directed edge from $x$ to $z$. However, the graph shows $z$ as a parent of $x$, which is misleading.\n - The claim that \"mode-based conditional mutual information (CMI) minimization is the necessary and sufficient condition for supervised disentanglement\" lacks a detailed formal list of necessary assumptions. This claim heavily depends on the data-generating process, and the specific assumptions should be clearly highlighted.\n - The link between Proposition 2 and the independence requirement in Definition 2 is unclear. Proposition 2 appears to be a mutual information result that is not directly related to causality. Although the authors attempt to bridge this gap in Lines 242 to 244, a more rigorous analysis with detailed assumptions and proofs is needed.\n2. The mode label estimation step is confusing. In this step, mode labels are estimated by clustering on the corresponding attribute. Consequently, there would be no confounders that affect $m_k$ and $a_{-k}$ while not affecting $a_k$, which violates the assumptions depicted in Figures 2 and 3.\n3. The paper lacks detailed explanations regarding the choice of attributes and the modes in the real-world datasets. Providing more context would help in understanding the experimental results better.\n4. There are typos in Lines 332 and 336. \"Figure 7a\" and \"Figure 7b\" should be replaced with \"Figure 5a\" and \"Figure 5b,\" respectively." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses section." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The studied problem is open and important -- latent confounding is prevalent and proper efforts are deserved to this problem.\n2. The experiments are extensive and the proposal looks promising." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a method called Supervised Disentanglement under Hidden Correlations (SD-HC) to improve disentangled representation learning (DRL). Traditional DRL methods struggle with hidden correlations in multi-modal data. SD-HC addresses this by discovering data modes under certain attributes and minimizing mode-based conditional mutual information. This approach ensures disentanglement while preserving mode and attribute information. The paper provides theoretical proofs and empirical evidence showing SD-HC outperforms existing methods on various datasets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper may benefit from a clear introduction of the task, especially the instantiation with real-world examples. Although I appreciate several examples present in the paper, a coherent running example would aid the readability much more if it includes the objective of the task, the problem of the hidden correlation, and the instantiation of the proposal in this example.\n2. The graphical model Figure 2 is not well-defined for me. $z$ variables are functions of the observed data $x$. How would they appear in the true data-generating process? To me, this is not even a causal graph -- if I intervene on attribute $a_{i}$ in the true data-generating process, this influences will not be blocked by estimated variables $z$'s.\n3. The main arguments are elusive to me. Especially, what does Proposition 2 imply? Its connection to the disentanglement is unclear. To me, it states that if you can find the modality variable $m_{1}$ and condition on it, the associated estimated variable $z_{1}$ will be d-separated from other attributes $a_{-1}$. First, there is no guarantee to discover the true modality variable $m_{1}$ (the proposal is somehow heuristic); second, how would this lead to the disentanglement conclusion?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- (minor) Why renaming? The current name SD-HC sounds more like a problem setting, not a specific method." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "(Disclosure: I've reviewed this paper before, and I used a PDF comparison tool to check the differences.)\n\n- This paper studies a challenging problem in disentangled representation learning.\n- The motivating example in Figure 1 is easy to follow and highlights the need for this method. It also links to the experiments on the human activity recognition task in Section 5.\n- The symbols and technical terms are properly defined. The definitions of disentangled data generation process and disentangled representations are clearly stated in Definitions 1 and 2.\n- The paper provided sufficient empirical evidence on both image and time series data." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses disentangled representation learning for data with correlated, multimodal attributes and proposes Supervised Disentanglement under Hidden Correlations (SD-HC), which infers hidden modes within attributes to achieve disentanglement while preserving essential information by minimizing mode-based conditional mutual information (CMI). The proposed approach is validated through theoretical analysis and experiments on illustrative toy data, image data (colored MNIST), and time series datasets (UCI-HAR, RealWorld, HHAR, and MFD), showing improved performance over existing methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The connections between the theory and learning are still unclear to me. A better way to characterize the proposed method is to say something like \"Under x assumptions, given x supervision, when x loss is optimized or x condition is satisfied, the learned representations are disentangled in the sense of definition x\".\n- The propositions and corollaries can be written in a more self-contained manner, even if the symbols have been defined in the text above." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024supervised,\ntitle={Supervised Disentanglement Under Hidden Correlations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4HAXypZfsm},\nnote={under review}\n}" }, "abstract": { "value": "Disentangled representation learning (DRL) methods are often leveraged to improve the generalization of representations. Recent DRL methods have tried to handle attribute correlations by enforcing conditional independence based on attributes. However, the complex multi-modal data distributions and hidden correlations under attributes remain unexplored. Existing methods are theoretically shown to cause the loss of mode information under such hidden correlations. To solve this problem, we propose Supervised Disentanglement under Hidden Correlations (SD-HC), which discovers data modes under certain attributes and minimizes mode-based conditional mutual information to achieve disentanglement. Theoretically, we prove that SD-HC is sufficient for disentanglement under hidden correlations, preserving mode information and attribute information. Empirically, extensive experiments on one toy dataset and five real-world datasets demonstrate improved generalization against the state-of-the-art baselines. Codes are available at anonymous Github https://anonymous.4open.science/r/SD-HC." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Disentangled representation learning", "Supervised representation learning", "Mutual information", "Causal graph analysis", "Hidden Correlations" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6a30d3a132ddef7f71d9506e3c7ac7382e2b9824.pdf" }, "presentation": null, "primary_area": { "value": "learning theory" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Supervised Disentanglement Under Hidden Correlations" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4HL2aiDV97
GAD-VLP: Geometric Adversarial Detection for Vision-Language Pre-Trained Models
main
Active
Adversarial detection;Geometric Distance;Multimodal models
alignment, fairness, safety, privacy, and societal considerations
3;3;3;5
5;3;5;4
4;2;2;2
2;1;2;2
4;3;3;3
3.5
4.25
2.5
1.75
3.25
-0.174078
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I think it'd be important to test adaptive attacks, especially considering that the same techniques used in this paper have been shown ineffective with standard classifiers." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper extends the use of several detection techniques to vision-language models, which might be interesting as making these models robust is a relevant challenge. The question of whether detection in multi-modal models is easier than with classifier is also interesting.\n\n- The experimental evaluation includes several datasets and tasks, and different architectures." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper benchmarks the effectiveness of several methods for detecting adversarial input images in multi-modal models. In particular, these techniques exploit the geometry of the features extracted by the vision and multi-modal encoders to distinguish between clean and adversarial images, and are agnostic of the downstream task. In the experimental evaluation, the different methods are compared on several datasets for both classification and retrieval tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Detection methods for image classification have often been shown ineffective against adaptive attacks: for example, LID was bypassed in [A], Mahalanobis distance was shown non-robust to adversarial perturbations in [B], and several detection methods are bypassed in [C]. Thus, the proposed methods should be tested against attacks targeting the detection mechanism, e.g. as discussed in [C]. Moreover, the ablation study in Sec. 5.3 about generalization to different attacks only uses methods which are very close to the PGD attack (with only 10 steps) used for tuning the detection methods, and optimize the same loss function: then the generalization to them is not surprising.\n\n- The techniques used for detection are from prior works, which limits the technical novelty of the paper.\n\n- I think it'd be useful to report the success rate of the attack before detection.\n\n[A] https://arxiv.org/abs/1802.00420 \\\n[B] https://arxiv.org/abs/2201.07012 \\\n[C] https://openreview.net/forum?id=af1eUDdUVz" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "This paper should consider further feedback regarding the lack of rigorous mathematical proof, computational scalability concerns, and vulnerability to adaptive attacks." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper is easy to follow, and the structure is well-claimed." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes GAD-VLP, a geometric adversarial detection framework for vision-language pre-trained models. The method leverages geometric approaches including local intrinsic dimensionality, k-nearest neighbors distance, Mahalanobis distance, and kernel density estimation to identify adversarial examples in VLPs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper lacks rigorous theoretical analysis of why geometric methods work better for VLPs compared to traditional models. While empirical results are shown, there's no formal proof or theoretical guarantees about the method's effectiveness, especially claiming why their approach is fundamentally sound for VLPs.\n\n2. The paper doesn't adequately address the computational costs of calculating geometric metrics, especially for large-scale deployments. Computing k-NN, Mahalanobis distances, and KDE for high-dimensional embeddings can be computationally expensive. \n\n3. The paper doesn't consider sophisticated adaptive attacks that might specifically target the geometric detection mechanism. Adversarial methods often adapt to known defense mechanisms, and the lack of evaluation against such adaptive attacks raises questions about the method's robustness in real-world scenarios. \n\n4. The authors don't thoroughly examine the false positive rates and their impact on model usability. In real-world applications, high false positive rates could lead to unnecessary rejection of legitimate inputs." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- What is the setup for retrieval is it top-1, top-5? \nNo other questions, some questions on choice of experiments can be inferred from Weaknesses section." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The problem being studied is important - Detecting adversarial inputs in multi-modal setup.\n- The related work exploration seems sufficient.\n- Diversity of evaluation tasks (classification, retrieval) and models is reasonable" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper looks at detection of adversarial examples for multi-modal models (CLIP, ALBEF). They extend uni-modal detection methods to vision-language based models. Experiments are conducted on classification and Image-retrieval tasks using different 'geometry of sample' based approaches to show GAD-VLP (proposed framework) works well when using different methods for detection." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Overall the paper lacks technical novelty as previously used methods (MMD, KDE etc) for uni-modal detection methods are just transferred to multi-modal setup.\n\n- The evaluation/testing setup in re to adversarial setup is not sufficient.\n\n- The adversarial attacks do not seem strong, looking at Fig. 2 a naturally strong attack would be to add a regularizer term that enforces the image embedding (under perturbation) to stay close to the clean embedding. Testing method on such strong attacks would be nice.\n\n- The attacks are based on10-step PGD, and versions of FGSM all at eps=8/255 (other values should have been looked at). A lot of new attacks (see the ones in [1, 2]) for CLIP like models have been proposed - testing which would have been also a valuable addition to the submission.\n\n- For the binary classifier using LID, attacking the classifier (detector) would also be a reasonably strong attack.\n\n[1] Schlarmann, Christian, and Matthias Hein. \"On the adversarial robustness of multi-modal foundation models.\" Proceedings of the IEEE/CVF International Conference on Computer Vision. 2023.\n\n[2] Mao, Chengzhi, et al. \"Understanding Zero-shot Adversarial Robustness for Large-Scale Models.\" The Eleventh International Conference on Learning Representations." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Please see the weakness." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1.\tThe writing is clear. The formulas are correct.\n2.\tThe experiment is multi-dimensional.\n3.\tThe research topic is important for VLP." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper propose the GAD-VLP, a method for detecting adversarial attacks in VLP Models. It uses geometric metrics like Local Intrinsic Dimensionality (LID), k-Nearest Neighbors (k-NN), Mahalanobis distance, and Kernel Density Estimation (KDE) to distinguish adversarial examples. GAD-VLP works across various VLP architectures, whether they use separate or combined embeddings for vision and language inputs. The method demonstrates high detection accuracy against different attacks and is applicable to tasks like zero-shot classification and image-text retrieval. The study highlights GAD-VLP's robustness and versatility across multiple domains." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tWhile the proposed method is effective, the metrics used are standard and not specifically tailored for the Vision-Language Pre-training (VLP) task.\n2.\tThe paper does not sufficiently highlight the distinctions between unimodal models and VLP models, resulting in a lack of justification for the choice of metrics.\n3.\tClassical VLP models, such as BLIP [1] and X-VLM [2], are missing from the analysis.\n4.\tThe adversarial attack methods utilized are limited and do not include more popular approaches like Set-level Guidance Attack (SGA) [3].\n\n[1] BLIP: Bootstrapping Language-Image Pre-training for Unified Vision-Language Understanding and Generation\n\n[2] Multi-Grained Vision Language Pre-Training: Aligning Texts with Visual Concepts\n\n[3] Set-level Guidance Attack: Boosting Adversarial Transferability of Vision-Language Pre-training Models" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024gadvlp,\ntitle={{GAD}-{VLP}: Geometric Adversarial Detection for Vision-Language Pre-Trained Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4HL2aiDV97},\nnote={under review}\n}" }, "abstract": { "value": "Vision-language pre-trained models (VLPs) have been deployed in numerous real-world applications; however, these models are vulnerable to adversarial attacks. Existing adversarial detection methods have shown their efficacy in single-modality settings (either vision or language), while their performance on VLPs, as multimodal models, remains uncertain. In this work, we propose a novel aspect of adversarial detection called GAD-VLP, which detects adversarial examples by exploiting vision and joint vision-language embeddings within VLP architectures. We leverage the geometry of the embedding space and demonstrate the unique characteristics of adversarial regions within these models. We explore the embedding space of the vision modality or the combined vision-language modalities, depending on the type of VLP, to identify adversarial examples. Some of the geometric methods do not require explicit knowledge of the adversary's targets in downstream tasks (e.g., zero-shot classification or image-text retrieval), offering a model-agnostic detection framework applicable across VLPs. Despite its simplicity, we demonstrate that these methods deliver a nearly perfect detection rate on state-of-the-art adversarial attacks against VLPs, including both separate and combined attacks on the vision and joint modalities." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Adversarial detection", "Geometric Distance", "Multimodal models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/877092dc5c513fa23e4f52d99216fea29577425c.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "GAD-VLP: Geometric Adversarial Detection for Vision-Language Pre-Trained Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4HNfKrGlSJ
Hindsight Preference Learning for Offline Preference-based Reinforcement Learning
main
Active
Offline Reinforcement Learning;Preference-based Reinforcement Learning;Preference Model
reinforcement learning
3;5;5;5;8
3;4;3;3;4
2;2;3;3;4
2;3;2;2;3
3;3;2;4;4
5.2
3.4
2.8
2.4
3.2
0.663403
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- Why do the segment encoder q, decoder p, and prior f all use the same notation, θ, for their parameters?\n- How does the distribution mismatch between Dp and Du cause problems, and how does HPL address this issue (through improved credit assignment) in the main benchmark experiments? Is there any analysis on these tasks other than the example of gambling MDP?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper introduces a framework for hindsight preference learning, with a practical implementation using a VAE architecture.\n- I think using the example of a gambling MDP greatly helps in understanding the motivation for using hindsight preference learning.\n- Experimental results support the effectiveness of HPL." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a new preference learning method that utilizes hindsight information. By incorporating future states into the reward through a VAE structure, HPL can generate more robust rewards and overcome the limitations of Markovian rewards." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Although I understand the benefits of hindsight preference learning over Markovian rewards, the authors do not clearly position their paper within the broader research on non-Markovian rewards. As mentioned in the related work, both the Preference Transformer and Hindsight PRIOR assume non-Markovian rewards and consider the full trajectory when calculating rewards. Given this, why does the paper primarily motivate its approach as an improvement over Markovian rewards, despite the existence of several approaches that do not rely on this assumption?\n- For example, what is the fundamental difference between this paper and the Hindsight PRIOR paper, as Hindsight PRIOR considers “the importance of a state in a trajectory given that a future state was reached” [1]? Please clarify the novelty of this work in comparison to other hindsight-based PbRL papers.\n- The gambling example is very helpful in illustrating the motivation. However, I am concerned that in more stochastic settings, hindsight modeling may introduce noise and add excessive complexity. The ablation study on k, while focusing on a different message, seems to somewhat support this concern.\n\nReferences \n\n[1] Verma, M., & Metcalf, K. (2024). Hindsight PRIORs for Reward Learning from Human Preferences." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The preference signals on the MetaWorld platform do not actually take into account future information. Why is the algorithm proposed in this paper considered superior?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "**Originality**: The paper studies a very important problem in PbRL: the nature of human annotations, which is inherently more complex than scripted annotations. By proposing a VAE structure with marginalization techniques, this paper a novel and interesting method.\n\n\n**Quality**: Experiments are conducted on existing human-annotated dataset where the proposed method shows greater performance.\n\n\n**Clarity**: The paper is well written with illustrative examples, clear text and figures.\n\n\n**Significance**: The studied problem: the mismatch between annotations and chosen reward models is of very importance and shall be beneficial for the community." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies preference-based reinforcement learning (PbRL). Notably, it proposes to consider human annotations from a holistic perspective; that is the annotations would include the considerations of future outcome instead of in-trajectory information only. Inspired by this mismatch, authors propose to model human preference with hindsight information via a VAE structure, which is further marginalized for rewarding labeling and following RL downstream tasks. Experiments are conducted on annotations from human and scripted teacher and advantageous performance is demonstrated." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The primary concern lies in the **mismatch** between motivation and downstream methods. \n\nThe paper aims to incorporate additional future information when learning the $r_{\\psi}$. However, actually, the $r_{\\psi}$ that takes future information into account should be the Q-function rather than the reward function. Or more rigorously, the $r_{\\psi}$ actually encompasses far more information than reward function, but it is not necessarily a Q-function. When downstream algorithms utilize the $r_{\\psi}$, the $r_{\\psi}$ is considered as reward function, employing a method that does not consider future information." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "In addition to questions about potential weaknesses, I would like to raise a few more questions:\n- When computing the reward using the prior distribution, how does the effect depend on the number of samples? (N=20 in appendix)\n- What about using both preference data and unlabeled data when training the VAE? Wouldn't this improve the representation across multiple points?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The problem to be solved is clearly specified, and although the approach is simple, it effectively addresses aspects that previous papers have overlooked.\n- In the experiments, HPL outperforms the existing SOTA algorithms on most datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors study the problem of offline preference-based RL. One major problem with existing SOTA approaches, as cited by the authors, is the evaluation of trajectory segments from a global perspective, making the obtained reward implicitly dependent on the future part of the segment. The contributions of the paper can be summarized as follows:\n- The authors propose HPL, which models human preferences using a reward function conditioned on the state, action, and future trajectory, referred to as hindsight information.\n- HPL leverages the trajectory distribution of the unlabeled dataset to learn a prior over future outcomes, providing robust and advantageous rewards during labeling." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- I understand that the use of a gambling MDP in Section 3.1 highlights the importance of this study. However, given the very short trajectory length (two steps) in the example, it remains unclear whether hindsight information was effectively applied to achieve accurate credit assignment using future trajectory information.\n- Figure 10 shows that HPL is already 2 to 10 times slower than other models. There seem to be several reasons for this slower speed. \n - Equation 8 requires summing all steps of each trajectory, which likely takes considerable time.\n - A significant amount of time is spent on sampling in Equation 9.\n - If the running time includes VAE modeling time, it is expected to take even longer.\n- The core idea of HPL seems to lie not in using future trajectory to train the model, but rather in the learned VAE that represents the future trajectory. As the authors mentioned, when the future length exceeds a certain threshold, it becomes difficult to accurately represent longer future segments, which lowers HPL's performance. If future information is available, the performance should ideally be more accurate. How might this limitation be addressed?\n\nMinor comments:\n\n- typo on line 403: ''$\\hat{P}(s_{good}|s_{1},a_{1})$ and $\\hat{P}(s_{good}|s_{1},a_{1})$\" -> \"$\\hat{P}(s_{good}|s_{1},a_{1})$ and $\\hat{P}(s_{bad}|s_{1},a_{1})$\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. The experiments utilize a customized split of the original dataset, using medium-expert quality for the preference dataset and medium quality for the unlabeled dataset. Has there been any evaluation of preference shift on real-world, large-scale datasets?\n2. Equation 9 includes the number of samples N in the reward function. How does N influence performance, and what is the practical range for this parameter?\n3. The concept of utilizing future segments appears similar to graph search and retrieval-based offline RL methods [1,2]. Could HPL achieve comparable results?\n[1] Deyao Zhu, Li Erran Li, and Mohamed Elhoseiny. Value memory graph: A graph-structured world model for offline reinforcement learning. arXiv preprint arXiv:2206.04384, 2022. \n[2] Yin Z H, Abbeel P. Offline Imitation Learning Through Graph Search and Retrieval. arXiv preprint arXiv:2407.15403, 2024." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1. The paper addresses a critical challenge in offline preference-based reinforcement learning, presenting well-motivated arguments and clearly illustrating the reward modeling issue through compelling examples.\n2. The authors make the insightful observation that human preferences often stem from final outcomes. Their proposed hindsight preference learning leverages future segments to better model these preferences, resulting in a more robust reward function, particularly for datasets with outcome shifts due to multiple modalities or approaches. The integration of conditional VAE makes the algorithm practical for complex tasks like visuomotor policy learning.\n3. The experimental evaluation strongly supports the core ideas, encompassing multiple popular benchmarks and competitive baselines. The comprehensive ablation studies examining future segment length, dataset sizes, and reward ensemble provide valuable insights into HPL's advantages." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Hindsight Preference Learning (HPL), a novel reward model learning method for offline preference-based reinforcement learning. The authors address the issue of distribution shift between preference-labeled and unlabeled datasets, which can bias the Markovian reward assumption. To tackle this problem, they propose a reward function that is conditioned not only on states and actions but also on future segments. To handle the high-dimensional nature of future segments, the authors employ a conditional VAE to transform them into embedding vectors that better integrate with the reward function. The effectiveness of the method is demonstrated through experiments on Gym-MuJoCo, Adroit, and Meta-World benchmarks, comparing against existing preference-based reinforcement learning algorithms. The results show significant improvements under preference distribution shift, while ablation studies validate the effectiveness of both the VAE and hindsight reward function." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The VAE implementation is limited to encoding future segments, whereas it could potentially be extended to data augmentation, prior distribution modeling, and other applications.\n2. While the authors focus on \"preference distribution shift\" between labeled and unlabeled datasets, they assume stability in other distributions. The paper does not adequately address potential shifts in visual appearance, dynamics, and sensor noise - common challenges in real-world datasets." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see the weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is clearly written, easy to understand, and provides detailed, well-functioning code.\n2. The proposed VAE method incorporates hindsight future information from trajectories into the reward model, which effectively mitigates the reward learning issue caused by the distributional shift between preference data $D_p$ and offline data $D_u$.\n3. The experimental performance is impressive, and the ablation studies are thorough." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Hindsight Preference Learning (HPL), a novel method for offline PbRL. It addresses the shortcomings of existing methods that rely on Markovian reward assumptions, by incorporating future trajectory outcomes in reward learning. HPL uses a variational auto-encoder (VAE) to encode future information and improves credit assignment by considering the future consequences of actions. The authors provide extensive empirical results across a variety of tasks (locomotion and manipulation) and demonstrate that HPL outperforms several state-of-the-art PbRL methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The motivation example might be unreasonable: As far as I know, in the same task, $D_u$ is often sampled from $D_p$, so they follow the same marginal distribution. When such an extreme example occurs, it indicates a significant distribution difference between $D_u$ and $D_p$. Although Section 5.3 demonstrates the effectiveness of HPL under distributional shift, this example may lack generality.\n2. In fact, this method can be viewed as leveraging the unlabeled dataset to improve the robustness of the reward model. The idea is somewhat similar to previous work [1], which learns the reward model from labeled data and then labels the unlabeled data to enhance offline RL performance. A further theoretical analysis, such as exploring the theoretical performance bounds of learning the hidden dynamics from the unlabeled data in the reward model, would enrich the paper (though this could be very challenging, so it's just a suggestion).\n3. In Table 7, the performance of OPPO and CPL is quite poor. Is this mainly because the preference data $D_p$ is too small?\n4. In Figure 6c, reducing the size of $D_p$ leads to worse performance. Why is that?\n5. The baselines and ablations lack verification of the core idea of the paper: If the authors aim to verify that the performance improvement comes from the richer environment dynamics provided by the unlabeled data, they should compare with an ablation version where the VAE learns future representations from $D_p$ itself rather than from $D_u$, to further clarify the source of the performance gain. Additionally, in Table 1, a bi-directional transformer version of PT could be used to model future information as another baseline for comparison.\n[1] The Provable Benefits of Unsupervised Data Sharing for Offline Reinforcement Learning. ICLR23." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose to model the effect of future to achieve better credit assignment when learning from human preferences." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024hindsight,\ntitle={Hindsight Preference Learning for Offline Preference-based Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4HNfKrGlSJ},\nnote={under review}\n}" }, "abstract": { "value": "Offline preference-based reinforcement learning (RL), which focuses on optimizing policies using human preferences between pairs of trajectory segments selected from an offline dataset, has emerged as a practical avenue for RL applications. Existing works rely on extracting step-wise reward signals from trajectory-wise preference annotations, assuming that preferences correlate with the cumulative Markovian rewards. However, such methods fail to capture the holistic perspective of data annotation: Humans often assess the desirability of a sequence of actions by considering the overall outcome rather than the immediate rewards. To address this challenge, we propose to model human preferences using rewards conditioned on future outcomes of the trajectory segments, i.e. the hindsight information. For downstream RL optimization, the reward of each step is calculated by marginalizing over possible future outcomes, the distribution of which is approximated by a variational auto-encoder trained using the offline dataset. Our proposed method, Hindsight Preference Learning (HPL), can facilitate credit assignment by taking full advantage of vast trajectory data available in massive unlabeled datasets. Comprehensive empirical studies demonstrate the benefits of HPL in delivering robust and advantageous rewards across various domains." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Offline Reinforcement Learning", "Preference-based Reinforcement Learning", "Preference Model" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/507f77a4fb6dc4609994e8e7fdf79ab6a700f24d.pdf" }, "presentation": null, "primary_area": { "value": "reinforcement learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/f1c7de91e29cde278d50dba428a67c78a15b8e46.zip" }, "title": { "value": "Hindsight Preference Learning for Offline Preference-based Reinforcement Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4HRRcqE9SU
ND-SDF: Learning Normal Deflection Fields for High-Fidelity Indoor Reconstruction
main
Active
Normal Deflection Fields;High-Fidelity Indoor Reconstruction
applications to computer vision, audio, language, and other modalities
5;6;6;6
4;5;4;3
2;3;3;3
3;3;2;3
3;3;3;3
5.75
4
2.75
2.75
3
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- In Line 280, is this a typo on $g(\\theta)$? Looks like this is a missing $\\Delta$." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The proposed normal deflection fields, which correct inaccurately estimated normals from normal maps, seem reasonable to me. \n- Table 6 and Figure 5 effectively showcase the contribution of the method's design through comprehensive ablation studies.\n- The experimental results demonstrate that this proposed pipeline achieves state-of-the-art reconstruction quality in indoor scenarios." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a pipeline for multi-view 3D reconstruction of indoor scenes. The pipeline is based on recently popular differentiable volume rendering methods using Signed Distance Functions (SDF), such as VolSDF and NeuS. To enhance reconstruction quality in indoor scenes, particularly in textureless regions, the paper introduces monocular priors for additional supervision, inspired by recent works like MonoSDF and NeuRIS.\n\nA core contribution of this paper is the introduction of a normal deflection field, which helps the model identify and correct inaccuracies in reference normal images estimated from monocular models, such as the Omnidata model. Experimental results show that this deflection field aids in reconstructing fine geometric details, such as thin structures.\n\nAdditionally, the authors introduce techniques to further improve geometry reconstruction quality, including (1) an adaptive normal loss weight and (2) unbiased volume rendering on thin objects.\n\nThe experimental results are promising and include comprehensive ablation studies. Overall, the paper is well-argued and substantiated." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Using unbiased volume rendering for thin structures seems reasonable. However, why not apply this unbiased volume rendering weight uniformly across all regions? The authors mention potential convergence issues with this approach, but it would be helpful if they provided a more in-depth analysis explaining the nature of these convergence issues and offered insights into why they occur." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. would it make sense to consider depth maps together to decide if the normal prior is accurate or not? For example, in most complex geometry areas such as thin structure areas, the depth is not continuous, and one may not be able to hope the normal is accurate there. \n2. what is the reason for the artifacts (which I mentioned in weakness 4), is that because without supervision the model struggles to recover the details?\n3. to deal with 2, maybe add some smooth loss / geometric preserving loss on these areas that the normal prior is categorized as bad? \n4. I don't see a reason why this method only works indoor, has the author also tried it on outdoor datasets?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is well-written and easy to follow. \n2. The normal deflection detection design is under reasonable assumptions and it offers a solution to deal with non-accurate normal prior. \n3. The angle deviation loss design fits the situation. \n4. The author provides complete experiments to show their method's performance. Including comparison with previous work quantitatively and qualitatively and ablation studies to show each module improves the results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposed a method to improve prior normal-guided neural SDF indoor scene reconstruction. Instead of supervising the normal uniformly across the whole domain, the author proposes to use a network to learn the deflect normal and desired adaptive angle prior to loss according to the deviation of the deflect normal to the prior normal. The intuition is that large deviation happens in complex structure areas where the normal prior in general is bad quality. The author also proposes to use the re-weighted loss on normal, depth, and color rendering. The experiments show the proposed method performs well qualitatively and quantitatively." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The bad quality normal prior problem seems only solved half. The author reduced the loss weight on the part that the normal prior is not accurate, but not the other half, what to do to improve the method itself performance in these areas.\n2. Like the other prior-based methods, the quality of the prior matters. The author assumes smooth area the prior is relatively accurate. \n3. Like the other SDF-based methods, the reconstruction seems to still struggle with the thin structures. \n4. I noticed that in most cases the method outperforms previous methods, but sometimes it creates noise and artifacts." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weakness." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The inaccuracy and bias are widely existed in monocular depth and normal priors, therefore, the idea of exploring the uncertainty of monocular clues to improve the reconstruction performance is reasonable and valuable.\n 2. The normal deflection field is effectively incorporated throughout the training process, enabling rendered deflected normal images to be supervised by estimated normals and adaptively applied in ray sampling, photometric loss, and unbiased rendering.\n3. The numerical and visualization results are attractive and impressive, which highly demonstrate the effectiveness of the proposed method.\n4. The ablation studies are thorough, clearly showcasing the effectiveness of each proposed module." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes ND-SDF, which learns a normal deflection field to reduce the significant bias in monocular normal and depth priors. The normal deflection field can be rendered through volume rendering to further guide adaptive prior loss, ray sampling and unbiased rendering. The numerical and visualization results are compelling." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Lack of citation. A similar approach of learning rotated normals to mitigate the bias between normal priors and ground truth normals was first proposed in NC-SDF[1]. While I know that NC-SDF has not released the source code so it’s difficult to directly compare with it, an appropriate clarification is necessary to distinguish the proposed idea from NC-SDF.\n2. Lack of quantitative comparisons with DebSDF. DebSDF also predicts the uncertainty of monocular priors and is able to reconstruct details such as chair legs. The numerical result of DebSDF is included in the table but visual comparison with DebSDF is absent, which is insufficient.\n\nThe methodology and experiments in this paper are solid. I like this paper, however, the two major concerns above prevent me from giving a higher score. I am willing to raise my score if the concerns are well addressed.\n\n[1]. Chen Z, Wu X, Zhang Y. NC-SDF: Enhancing Indoor Scene Reconstruction Using Neural SDFs with View-Dependent Normal Compensation. Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024: 5155-5165." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. For Figure 3, what about the comparison with unbiased density function in NeuS, TUVR, DebSDF, HF-NeuS[3], and NeuRodin(optional, because NeuRodin is the concurrent work)[4]? These works also propose an unbiased density strategy. (visual comparison is better for good understanding)\n\n2. In Figure 7, The deflection angle map appears to have a higher value at complex structural regions. What's the visual comparison between edge mask(EaNeuS), NCC mask(Equation 6 in NeuRIS), and uncertainty-aware mask(DDPNeRF[5], DebSDF, H2OSDF)?\n\n3. For Equation 12 of this paper, it seems that when $\\hat{\\mathbf{N}}^d(\\mathbf{r})$ and $\\hat{\\mathbf{N}}(\\mathbf{r})$ perfectly fits the prior $\\mathbf{N}(\\mathbf{r})$, then this loss will become a perfect zero. The reviewer is concerned that this situation that causes zero loss doesn't seem to be in a good state. Is there a more comprehensive explanation about this training objective and why this training objective will work to help solve the concerns above?\n\n4. Does the proposed normal deflection field have some advantages over the normal uncertainty field, like in DebSDF and H2OSDF?\n\nReference:\n\n[3] HF-NeuS: Improved Surface Reconstruction Using High-Frequency Details. Wang et al. (Neurips 2022)\n\n[4] NeuRodin: A Two-stage Framework for High-Fidelity Neural Surface Reconstruction. Wang et al. (Neurips 2024)\n\n[5] Dense Depth Priors for Neural Radiance Fields from Sparse Input Views. Roessle et al. (CVPR 2022)" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper writing is easy to follow.\n\n2. The experiment covers lots of different dataset for a comprehensive comparison, like Scannet, TanksandTemples, Scannet++, Replica.\n\n3. The ablation study indicates the effectiveness of the deflection field and adaptive angle prior when compared to its base model." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces ND-SDF and proposes to learn a Normal Deflection field to represent the angular deviation between the scene normal and the normal prior.\n\nThe paper also additionally introduces a ray sampling strategy based on the deflection angle to improve the surface quality further. The paper also proposes to use an unbiased function inspired by TUVR." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Major weakness:\n\n1. Although the dataset included is various, the compared methods are not so much comprehensive(except the Scannet). The quantitative comparison on TanksandTemples, Scannet++, Replica only includes three works in 2022(NeuRIS, MonoSDF, VolSDF) and one work in 2021(Unisurf). This leads to an insufficient experiment. For example, what about the comparison with HelixSurf, TUVR, DebSDF, H2OSDF on TanksandTemples, Scannet++, Replica? The reviewer thinks the comparison without newer baselines is insufficient on TanksandTemples, Scannet++, Replica. Although Table 1 includes most newer baselines, usually only 4 scenes in Scannet are used for indoor scene reconstruction experiments. This is why the reviewer thinks that more quantitative experiments are required here.\n\nDetails: The reviewer understands that it's not necessary to run all previous baselines in all datasets. But just place here as an example. Since the unbiased weight technique is used, the reviewer thinks **at least** TUVR(CVPR2023) is a newer baseline that should be included in both the quantitative and qualitative comparisons on TanksandTemples, Scannet++, Replica datasets. (But more comparison with other newer methods will enhance the paper's experiment part.)\n\n2. The qualitative comparison is also insufficient (only compared with older baselines). It's better to include the qualitative comparison with newer baselines in Figure 4, Figure 11, Figure 12, Figure 13.\n\n3. Some works have both pure MLP version and hash grid version (like MonoSDF). The performance and efficiency of different versions have some differences. Since ND-SDF includes instant-NGP in its implementation, it's better to clearly indicate the version to compare within the table. (For example, \"MonoSDF(mlp)\", \"MonoSDF(grid)\".)\n\nMinor weakness:\n\n4. For Section 3.4, similar ideas of better ray sampling strategy have been investigated in some previous work. Like edge-aware sampling in EaNeuS[1], GaussianRoom[2], and uncertainty-aware sampling in DebSDF. Please consider adding a brief discussion about these previous methods.\n\n5. In Figure 2, \"GT depth\" and \"GT Normal\" should be changed to \"depth prior\" and \"normal prior\".\n\n6. The number of selected scenes in each dataset for the experiment has not been reported.\n\n7. The dataset conducted in the ablation study has not been reported.\n\nReference:\n\n[1] Edge-aware Neural Implicit Surface Reconstruction. Li et al. (ICME 2023)\n\n[2] GaussianRoom: Improving 3D Gaussian Splatting with SDF Guidance and Monocular Cues for Indoor Scene Reconstruction. Xiang et al. (arxiv 2024)" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Learning Normal Deflection Fields for High-Fidelity Indoor Reconstruction" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024ndsdf,\ntitle={{ND}-{SDF}: Learning Normal Deflection Fields for High-Fidelity Indoor Reconstruction},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4HRRcqE9SU},\nnote={under review}\n}" }, "abstract": { "value": "Neural implicit reconstruction via volume rendering has demonstrated its effectiveness in recovering dense 3D surfaces. However, it is non-trivial to simultaneously recover meticulous geometry and preserve smoothness across regions with differing characteristics. To address this issue, previous methods typically employ geometric priors, which are often constrained by the performance of the prior models. In this paper, we propose ND-SDF, which learns a Normal Deflection field to represent the angular deviation between the scene normal and the prior normal. Unlike previous methods that uniformly apply geometric priors on all samples, introducing significant bias in accuracy, our proposed normal deflection field dynamically learns and adapts the utilization of samples based on their specific characteristics, thereby improving both the accuracy and effectiveness of the model. Our method not only obtains smooth weakly textured regions such as walls and floors but also preserves the geometric details of complex structures. In addition, we introduce a novel ray sampling strategy based on the deflection angle to facilitate the unbiased rendering process, which significantly improves the quality and accuracy of intricate surfaces, especially on thin structures. Consistent improvements on various challenging datasets demonstrate the superiority of our method." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Normal Deflection Fields", "High-Fidelity Indoor Reconstruction" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/98b35d836ddaea3b2dbfe627cd9ca78db6ed06ae.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/61b33d9dfb7ccb6053e2840a710eca961f72fa28.zip" }, "title": { "value": "ND-SDF: Learning Normal Deflection Fields for High-Fidelity Indoor Reconstruction" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4Hd7u3LHlZ
Primal-Dual Graph Neural Networks for General NP-Hard Combinatorial Optimization
main
Active
neural algorithmic reasoning;graph neural networks;combinatorial optimization
learning on graphs and other geometries & topologies
3;5;5;5
4;4;2;4
2;3;3;3
1;2;3;3
2;3;3;3
4.5
3.5
2.75
2.25
2.75
-0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Do the mean-max aggregation lines in table 1 use optimal solutions in training as well? I find it a little strange that max aggregation performs better than the algorithm without the optimal solutions. Shouldn't the problem-specific aggregations do better?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- If I understand correctly, this primal dual approach will necessarily yield a certificate of solution quality since the dual objective bounds the primal, which is not easy to obtain with neural nets on those problems.\n- Ablations and experiments on real world data are helpful." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes to simulate the primal-dual framework for approximation algorithms by Goemans and Williamson using a graph neural network. The paper follows the standard neural algorithmic reasoning blueprint with an encoder-processor-decoder approach and it leverages a gnn that performs message passing on a bipartite graph representation of the problem. The model is used to solve NP-hard problems such as minimum vertex cover, minimum hitting set, and minimum set cover. The model also leverages optimal solutions to the problem as additional supervision and is tested on real and synthetic graphs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My main concerns are the following:\n- The experimental comparisons are inadequate. For example, MVC (and its complement problem of max independent set) have been tackled by several works in the literature (examples can be found in [1,2,3]). Warm-starting solvers has also been done (e.g., see 4). In my view, discussion and comparisons with some work from the literature on the core experiments as well as the warm start ones would help clarify where the model stands overall. Furthermore, the experiments on synthetic instances are rather small-scale (especially table 1). [2,3] provide an experimental setting with hard synthetic instances of larger sizes for MVC. It would be good to see how the model performs against some of those baselines on those instances.\n- On the topic of scale, it would be good to see what the performance looks like if the synthetic instances were scaled up an order of magnitude. Right now, it seems like it would take a lot of supervision to train a scalable version of this algorithm, especially if one wanted to leverage optimal solutions, which become costly for NP-hard problems as the instance size grows.\n- A central goal of this paper is to have a machine-learning model that closely aligns with the primal-dual framework. The numbers seem to agree in table 1 when it comes to MVC and MHS but for MSC we see that not having access to the optimal solutions seems to lead to results worse than the primal-dual algorithm itself. This points to the broader issue that even though the model is designed to closely align with this framework, the approximation guarantee is not secured in practice.\n\nOverall, this is an interesting approach to neural combinatorial optimization. However, I find the empirical results unconvincing at this stage and ultimately the algorithm does not seem to preserve the approximation guarantee in practice so I am unsure about the contribution as a whole. At this stage. I cannot recommend acceptance but I am open to reconsidering after the rebuttal.\n\n\n\n\n1. Khalil, Elias, et al. \"Learning combinatorial optimization algorithms over graphs.\" Advances in neural information processing systems 30 (2017).\n2. Brusca, Lorenzo, et al. \"Maximum independent set: self-training through dynamic programming.\" Advances in Neural Information Processing Systems 36 (2023)\n3. Yau, Morris, et al. \"Are Graph Neural Networks Optimal Approximation Algorithms?.\" arXiv preprint arXiv:2310.00526 (2023).\n4. Benidis, Konstantinos, et al. \"Solving recurrent MIPs with semi-supervised graph neural networks.\" arXiv preprint arXiv:2302.11992 (2023)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to \"Weakness\"." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper proposes an approach that is the first NAR method to surpass the learned algorithm.\n2. Author conducted several experiments with good numerical results favoring the proposed framework.\n3. Theoretical proofs are given to validate the effectiveness of the proposed framework." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes an approach that integrates the primal-dual method with GNNs to solve NP-hard combinatorial optimization problems more efficiently. \nIt establishes theoretical guarantees that the proposed framework can replicate the classical primal-dual method. \nBy refining the optimal solutions for these problems, the model surpasses the performance of conventional algorithms. \nAdditionally, by utilizing predictions as warm starts, the search time for commercial solvers can be reduced.\nNumerical results showcase the scalability and effectiveness of the proposed framework." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While the proposed framework is designed to address NP-hard combinatorial optimization problems, the proposed framework primarily operates on linear programming (LP) relaxation, which appears to be an extension to existing methods, such as those referenced in [1] and [2]. \nBesides, there are also many works that directly predicts solutions to CO problems. e.g. [3] and [4].\nAn extended discussion, especially with experiments, on how the proposed approach differs from them would strengthen the authors' claims.\n\n[1]. Bingheng Li, Linxin Yang, Yupeng Chen, Senmiao Wang, Qian Chen, Haitao Mao, Yao Ma, Akang Wang, Tian Ding, Jiliang Tang, and Ruoyu Sun. Pdhg-unrolled learning-to-optimize method for large-scale linear programming, 2024.\n\n[2]. Ziang Chen, Jialin Liu, Xinshang Wang, Jianfeng Lu, and Wotao Yin. On representing linear programs by graph neural networks, 2023.\n\n[3]. Vinod Nair, Sergey Bartunov, Felix Gimeno, Ingrid von Glehn, Pawel Lichocki, Ivan Lobov, Brendan O’Donoghue, Nicolas Sonnerat, Christian Tjandraatmadja, Pengming Wang, Ravichandra Addanki, Tharindi Hapuarachchi, Thomas Keck, James Keeling, Pushmeet Kohli, Ira Ktena, Yujia Li, Oriol Vinyals, and Yori Zwols. Solving mixed integer programs using neural networks, 2021.\n\n[4]. Dinghuai Zhang, Hanjun Dai, Nikolay Malkin, Aaron Courville, Yoshua Bengio, and Ling Pan. Let the flows tell: Solving graph combinatorial optimization problems with gflownets, 2023." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "NA" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- The three problems are closely related. Can the method be applied on other problems that can work with the primal-dual algorithm?\n\n- In table 2, why use geometric mean for total time while arithmetic mean for optimal time?\n\n- what is the relation of PDGNN and GAT, GCN and SAGE? How are they combined as shown in Table 4? What are the inputs and outputs of each module? Since the main text does not mention how are PDGNN works with other models, the results confused me.\n\n- what does the claimed OOD generalizability comes from? There is no module designed for the purpose. So it confuses me why the method has good OOD generalizability.\n\nSince I am not an expert in these problems, I would consider raising my score based on the rebuttal." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Writing is good with many details. The method looks sound. Experiments are comprehensive." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper design a GNN model based on the primal-dual algorithm to solve CO problems including Minimum Vertex Cover, Minimum Set Cover, and Minimum hitting set. The retrained model can be used to warm start a commercial solvers, and also applicable to other problems closely connected with the studied problems, eg node classification." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- To me, it is not clear whether the work is important in literature, especially in dealing with the three problems.\n\n- Important baselines are missing. For example, other neural solvers for the three problems are not provided. GIN is not designed with the purpose of solving the problem. So the comparison with merely GIN can be unfair. To demonstrate the effectiveness, it is necessary to include more solvers for the problem.\n\n- Authors give results of model performance on OOD data in Table 2. However, since authors do not give results of other baselines, the OOD generalizability cannot be proved directly. By the way, no OOD results on the minimum set cover problem are provided." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Refer to \"Weaknesses\".\n\n2. In the experiments, how many layers does PDGNN has?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The observation that the primal-dual algorithm naturally align with message-passing GNN is interesting. The integer programming studied in this paper is a quite general problem. The writing is clear and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper propose a new GNN architecture, termed PDGNN, for solving the integer programming $\\min \\vec{w}^T\\cdot\\vec{x}$ s.t. $A\\vec{x}\\geq \\vec{1}$ and $x_i\\in \\{0,1\\}$, which can model many combinatorial optimization problems, such as set cover and hitting set. This GNN is designed by unrolling a primal-dual approximation algorithm. Numerical experiments are conducted to evaluate the performance of PDGNN." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. About Section 1.1: The phenomenon that GNNs designed by unrolling an algorithm surpass the performance of the algorithm itself is not surprising (since the algorithm is an instantiation of the GNN, so the GNN has enough expressive power to surpass). In fact, the phenomenon is general: see. e.g. \"PDHG-unrolled learning-to-optimize method for large-scale linear programming\" in ICML 2024. As in\n real-world scenarios, PDGNN will commonly be used as a warm start, so the paper could highlight how much computational time can be saved by employing PDGNN as a warm-start.\n\n2. As shown in Table 3, by employing PDGNN as a warm-start, the commercial solver achieve a $\\leq 1.1\\times$ speedup compared to the vanilla version. The improvement did not impress me, as I had been expecting a $\\geq 2\\times$ speedup.\n\n3. The paper proves that PDGNN can exactly replicates the primal-dual algorithm, but lacks explicit theoretical guarantees on the efficiency and effectiveness of PDGNN. For example, I would like to see theorems like \"PDGNN of xx layers has enough expressive power to output an xx-approximate solution for xxx problems\",or discussions on the challenges in deriving such theoretical guarantees or intuitions about what kind of guarantees might be possible given the current framework." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a general GNN-based framework using primal-dual approximation algorithms to approach NP-hard combinatorial optimization problems, advancing neural algorithmic reasoning beyond the polynomial-time-solvable domain." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024primaldual,\ntitle={Primal-Dual Graph Neural Networks for General {NP}-Hard Combinatorial Optimization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4Hd7u3LHlZ},\nnote={under review}\n}" }, "abstract": { "value": "Neural algorithmic reasoning (NAR) seeks to train neural networks, particularly Graph Neural Networks (GNNs), to simulate and generalize traditional algorithms, enabling them to perform structured reasoning on complex data. Previous research has primarily focused on polynomial-time-solvable algorithmic problems. However, many of the most critical optimization problems in practice are NP-hard, exposing a critical gap in NAR. In this work, we propose a general GNN-based framework for NP-hard optimization problems, built on the classical primal-dual framework for designing efficient approximation algorithms. We enhance this framework by integrating optimal solutions to these NP-hard problems, enabling the model to surpass the performance of the approximation algorithms it was initially trained on. To the best of our knowledge, this is the first NAR method explicitly designed to surpass the performance of the classical algorithm on which it is trained. We evaluate our framework on several NP-hard combinatorial optimization problems, demonstrating its ability to generalize to larger and out-of-distribution graph families. In addition, we demonstrate the practical utility of the framework in two key applications: as a warm start for commercial solvers to reduce search time, and as a tool to generate embeddings that enhance predictive performance on real-world datasets. Our results highlight the scalability and effectiveness of GNNs for tackling complex combinatorial optimization problems, advancing their utility beyond the scope of traditional polynomial-time-solvable problems." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "neural algorithmic reasoning", "graph neural networks", "combinatorial optimization" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/8f7ac14732a0b6a8fd1789254c6c91eaff7868f7.pdf" }, "presentation": null, "primary_area": { "value": "learning on graphs and other geometries & topologies" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Primal-Dual Graph Neural Networks for General NP-Hard Combinatorial Optimization" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ILqqOJFkS
SPikE-SSM: A Sparse, Precise, and Efficient Spiking State Space Model for Long Sequences Learning
main
Active
state space models;spiking neural network;long sequence modeling;language modeling
foundation or frontier models, including LLMs
3;3;5
5;5;5
2;2;3
1;2;3
2;2;2
3.666667
5
2.333333
2
2
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The author's point-by-point discussion is well-executed but lacks an explanation of the logical relationships between the three different issues, which makes them feel somewhat disconnected. What is the logical relationship among these three issues—is it progressive or parallel?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The introduction of the PMBC strategy enables parallel processing in SNNs, representing an innovative advancement. Additionally, the proposed LIF neuron model incorporates a reset-refractory mechanism, enhancing both biological interpretability and dynamic computational capabilities.\n2. The article is well-structured, making the authors' arguments and experimental content easy to follow. The experimental results partially demonstrate the suitability of the proposed method as an SNN architecture for long-sequence tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents SPikE-SSM, a novel spiking state space model designed to address key challenges in long-sequence learning with spiking neural networks (SNNs). The authors introduce a boundary compression strategy (PMBC) to accelerate spiking neuron model inference, enabling parallel processing for long sequence learning. Additionally, they propose a new LIF neuron model with a reset-refractory mechanism to exploit the temporal dimension for biologically interpretable dynamic computation. The model's evaluation on long-range arena benchmarks and the large language dataset WikiText-103 demonstrates the potential of dynamic spiking neurons for efficient long-sequence learning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The scalability of SPikE-SSM to larger datasets and more complex tasks has not been thoroughly discussed. Additionally, there is a lack of comparison with non-SSM-based SOTA SNN architectures in terms of computational efficiency and energy consumption. \n2. The presentation of PMBC in Figure 2 is unclear, making it difficult for readers to grasp the design rationale and core concepts being conveyed.\n3.What does \"x,h,x,...,x\" represent in Figure 3? It is not explained in the caption, which may lead to confusion.\n4. I couldn't find a complete diagram of the network architecture, nor any information on parameters or settings, in either the main text or the appendix.\n5. To my knowledge, there are several existing strategies for parallel training of SNNs[1][2], which the authors did not compare in this paper. What are the advantages of the proposed approach compared to these existing methods?\n[1] Fang, Wei, et al. \"Parallel spiking neurons with high efficiency and ability to learn long-term dependencies.\" Advances in Neural Information Processing Systems 36 (2024).\n[2] Zhang, Shimin, et al. \"Tc-lif: A two-compartment spiking neuron model for long-term sequential modelling.\" Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 38. No. 15. 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. In Figure 1, the input to the GLU is a floating-point number. Is the multiplication of floating-point numbers unavoidable in this case? Or is there a more effective way to replace this module?\n2. While the article introduces sparsity through the reset mechanism and refractory period, is this nonlinear transformation interpretable? Specifically, how do the reset modules and refractory periods enhance model performance?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. A method similar to fixed-point iteration is proposed to solve the output based on the input sequence efficiently. This method can run in parallel, accelerating the training process.\n2. Better accuracy and sparsity compared with other spikingSSMS." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a spiking neural network model based on SSM. It employs a method similar to fixed-point iteration to address the challenge of training the reset mechanism in parallel. Additionally, the model introduces a refractory period for neurons, enhancing biological interpretability and resulting in sparser spike patterns." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. In Figure 1, the input to the GLU is a floating-point number. Is the multiplication of floating-point numbers unavoidable in this case? Or is there a more effective way to replace this module?\n2. While the article introduces sparsity through the reset mechanism and refractory period, is this nonlinear transformation interpretable? Specifically, how do the reset modules and refractory periods enhance model performance?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "The paper presents an incremental improvement over existing SNN-SSM methods but lacks the novelty, theoretical rigor, and experimental results to justify acceptance. While the proposed PMBC strategy and refractory neuron model are interesting, their practical benefits are not convincingly demonstrated. Additionally, the paper’s clarity issues further detract from its overall impact. A stronger focus on theoretical backing, real-world applications, and improved clarity would be needed for future consideration." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Detailed questions you can refer to weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The proposed refractory neuron model is biologically inspired and brings interpretability to SNNs for long-sequence tasks. The experiments are well-structured, with results on a wide range of tasks (LRA benchmarks and WikiText-103) showing some promise." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents SPikE-SSM, a framework that aims to integrate spiking neural networks (SNNs) with state space models (SSMs) to address the challenges of long-sequence learning in an energy-efficient and sparse manner. The authors introduce several innovations such as the Parallel Max-Min Boundary Compression (PMBC) strategy for accelerating SNN inference, a refractory neuron model for temporal dynamics, and the integration of trainable thresholds for improved sparsity-accuracy trade-offs. Extensive experiments on LRA benchmarks and WikiText-103 demonstrate improved performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The core contribution—integrating SNNs with SSMs—is somewhat incremental. There are previous works, such as SpikingSSM, that already explore similar ideas. The novelty of the proposed solutions does not clearly differentiate it from existing methods, especially since the claimed improvements (like PMBC) are marginally better in terms of accuracy. The paper proposes several assertions and claims without sufficient theoretical backing. The proofs in the appendices are not rigorous enough and seem more heuristic than formal. This weakens the impact of the PMBC algorithm, which is a major part of the contribution. While the authors test their approach on multiple benchmarks, the results do not show significant improvements over previous methods. In many cases, the accuracy gains are minimal (sometimes less than 1%) compared to competing models, particularly when considering the energy-efficiency trade-offs. Furthermore, the benchmarks chosen (such as sMNIST) are relatively simple, and the performance on more challenging real-world tasks could have been more extensively evaluated.\n\nOne of the key selling points of SNNs is their energy efficiency. However, while the authors present some energy consumption estimates, these are based on theoretical assumptions (e.g., number of operations) rather than real-world implementations on neuromorphic hardware. Without real hardware measurements, the claimed energy benefits are speculative and diminish the practical relevance of the paper. The paper is dense and difficult to follow, particularly in sections where the PMBC algorithm and the refractory neuron model are introduced. The text often lacks clarity, and it is not always clear how the various components fit together. This makes the methodology hard to replicate." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "The work propsed a sparse, precise and efficient spiking state space model for long sequence learning, using parallel boundary compression and refractory spiking neurons." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024spikessm,\ntitle={{SP}ikE-{SSM}: A Sparse, Precise, and Efficient Spiking State Space Model for Long Sequences Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ILqqOJFkS},\nnote={under review}\n}" }, "abstract": { "value": "Spiking neural networks (SNNs) provide a low-power, energy-efficient solution by utilizing the spike-based and sparse nature of biological systems. Since the advent of Transformers, SNNs have struggled to compete with artificial networks on long sequential tasks, until the recent emergence of state space models (SSMs), which offer superior computational efficiency and modeling capability. However, applying the highly capable SSMs to SNNs for long sequences learning poses three major challenges: ❶ The membrane potential is determined by the past spiking history of the neuron, leading to reduced efficiency for sequence modeling in parallel computing scenarios. ❷ Complex dynamics of biological spiking neurons are crucial for functionality but challenging to simulate and exploit effectively in large networks. ❸ It is arduous to maintain high sparsity while achieving high accuracy for spiking neurons without resorting to dense computing, as utilized in artificial neuron-based SSMs. To address these challenges, we propose a sparse, precise and efficient spiking SSM framework, termed SPikE-SSM. For ❶, we propose a boundary compression strategy (PMBC) to accelerate the inference of the spiking neuron model, enabling parallel processing for long sequence learning. For ❷, we propose a novel and concise neuron model incorporating reset-refractory mechanism to leverage the inherent temporal dimension for dynamic computing with biological interpretability. For ❸, we hierarchically integrate the proposed neuron model to the original SSM block, and enhance the dynamics of SPikE-SSM by incorporating trainable thresholds and refractory magnitudes to balance accuracy and sparsity. Extensive experiments illustrate the effectiveness and robustness of SPikE-SSM on the long range arena benchmarks and large language dataset WikiText-103, showing the potential of dynamic spiking neurons in efficient long sequence learning. The code will be publicly available." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "state space models", "spiking neural network", "long sequence modeling", "language modeling" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/9db2f9cc7f1d58a3162949512fe50d38467c2c6e.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "SPikE-SSM: A Sparse, Precise, and Efficient Spiking State Space Model for Long Sequences Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4IRYGvyevW
Beyond the Lazy versus Rich Dichotomy: Geometry Insights in Feature Learning from Task-Relevant Manifold Untangling
main
Active
Computational neuroscience;storage capacity;neural manifolds;representational geometry;rich and lazy learning;training dynamics;feature learning
applications to neuroscience & cognitive science
3;5;5;6;6
3;2;3;3;3
1;3;2;3;2
1;3;2;3;3
2;2;2;2;3
5
2.8
2.2
2.4
2.2
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Regarding the proposed effective geometric measures to explain capacity changes, are there standard reference lengths compared to the radius? A simple manifold radius magnitude may not accurately capture the problem's complexity when the differences between manifold means scale identically.\n- Additionally, the definitions and implications of axes alignment, center alignment, and center-axes alignment are less discussed compared to radius and dimensionality (e.g., in Figures 5b, 6c).\n- When using geometric measures, which layer(s) should be analyzed? Are the results consistent across different layers?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The novel application of manifold capacity and other effective geometric measures to investigate the lazy-vs-rich dichotomy is intriguing. It shows better alignment with the degree of feature learning compared to other metrics, such as weight changes or NTK-label alignments.\n- Most of the derivations seem correct, though I could not verify every detail." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a geometric framework to examine the manifold of neural network representations, providing insights into the distinctions between lazy and rich training regimes in feature learning. Specifically, it revisits the manifold capacity concept introduced in Chung et al. (2018), theoretically demonstrating that manifold capacity can serve as an indicator of the underlying richness in feature learning. Based on empirical studies using synthetic data and two-layer neural networks, observations are made regarding the relationship between manifold capacity and the degree of feature learning, as well as the stages of feature evolution. The proposed geometric measures are further applied to neural networks in neuroscience and out-of-distribution generalization tasks to explore the broader implications of this approach." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The theoretical derivation relies on a one-step gradient argument, but the fact is not mentioned in the manuscript. Moreover, the link between Theorem 2’s results and the increase in feature learning degree is not entirely clear, and additional commentary could enhance clarity.\n- It would be helpful to discuss the generalizability of the observations, such as those in Figure 4. Various hyperparameters (e.g., the choice of optimization algorithms, weight initialization methods, batch size, learning rate, and scheduling) could influence implicit biases in the algorithm, affecting neural representations, geometric metrics, and even the stages of learning." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "-\tThere seems to be no explanation for Figure 2c. I don’t know what does those operations mean.\n-\tI’m a bit confused about eq (1), the definition of model capacity. Are the manifold $\\{\\mathcal{M}_i\\}$ predefined or they are changing when $N$ is increasing. If they are changing, how do you choose them? Also, what values should $y_i$ take?\n-\tWhen looking at Appendix C for the way to compute manifold capacity, several notions seem to be not defined, such as $T$, $\\lambda$.\n\n-\tI believe the way of computing manifold capacity should be mentioned in the main text, or at least mentioned under what conditions are those values computed (I believe they cannot be exactly computed unless some conditions are assumed).\n\n-\tFor Theorem 1, when looking at the actual statement in appendix, these results are proved only under the setting that with one gradient step update. Though it is understandable that going beyond this is technically difficult, I feel the statement in the main text gives the reader a wrong impression.\n\n-\tIn section 3.2/figure 3b, it’s not clear to me what the definition of wealthy and poor regime are and how they are related to the input dimension. Also, I’m not sure why at initialization there will be task-relevant features (and I don’t know what are task-relevant features in this setting). Moreover, the purple line in capacity in figure 3b changes only a little bit throughout the training, I’m not sure how to interpret this (feature learning only changes a little?).\n\n-\tIt’s a bit hard for me to follow section 4, presumably because the definition of these metric (e.g., dimension, radius,…) are missing.\n\n\nTypo:\n\n-\tLine 173, $o_N(1) \\to 1$ -> $o_N(1) \\to 0$" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "-\tUnderstanding feature learning in deep learning is an important and interesting research problem.\n-\tThe paper proposes a metric called manifold capacity to measure the feature learning progress, which seems to be new.\n-\tSeveral experiments in different domains are presented to support the claim." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors focus on the feature learning in deep learning. They use manifold capacity as a metric to quantify the degree of richness of feature learning. Experiment results show that such capacity can reveal different learning stages in different settings. They also apply this to problems in neuroscience and out-of-distribution tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "See questions section below." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "In Section 2.1, what does \"i-th input category\" mean? In eq. (1), $P$ is the variable of the $\\max$ operator, and for any $i \\in [P]$, $\\mathcal M_i$ is defined, and $\\mathcal M_i$ is defined by $\\mathcal X_i$. This somehow implies that $\\mathcal X_i$ is also a part of the variable of the $\\max$ operator, instead of pre-defined. Is it true?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The idea of catching the richness by manifold capacity is potentially useful.\n- The experiments verifies that manifold capacity captures different stages of learning." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposed that manifold capacity is a better way to identify the process of feature learning than commenly used rich-lazy dichotomy. This claim is theoretically proved in a limited setting, and empirically verified." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The presentation is very confusing. For example, in the experiments, there are several critical measures, such as effective radius, dimension, center alignment etc. but they are not defined (at least not in the main paper).\n- The main paper claimed that the Theorem 1 is proved for a 2-layer NN. However, the NN used in the proof is actually different from what people would expect to be a \"2-layer NN\", since based on Assumption 1: 1) the second layer is not trained but set to random; and 2) the choice of activation function is very limited as there is a rather strong condition on the activation function." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "What does \"untangling\" specifically mean? \n\nWhat are the implications of the 2-layer results on deeper neural networks?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "I think the use and introduction of manifold capacity as a measure is interesting for the ML community as a metric for representation learning. They relate manifold capacity to test accuracy in 2-layer networks and deeper networks empirically. They also relate to other measures such as weight changes and alignment, though this is mostly in the appendix." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper advocates the use of manifold capacity as a way of classifying as a measure of the regime of neural networks. Manifold capacity is a measure from neuroscience literature that quantifies the performance of a linear classifier over features for classification as a measure of the richness of classifiers. The authors advocate this measure over other measures such as \"lazy regimes\" during which features show limited change in learning. They use this to analyze 2-layer RELU networks" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper is haphazardly written. The difference between previous work and their problem statement is not well delineated. Their goals arent succinctly mentioned. The main theorem statement being nearly vacuous in the main paper. \"In 2-layer neural networks trained with gradient descent in the rich regime the changes in capacity track the underlying degree of richness in feature learning.\" Since richness itself is defined as capacity, this is either circular or vacuous. \n\nA deeper read of the result in the appendix shows that they approximate the result of gradient descent and use a gaussian model to approximate a 2-layer network. This is perhaps the most interesting result in the paper but is not at all covered in the main paper. The authors should state that they prove that using SGD, models with high capacity converge to high accuracy or something less vacuous and offer the proof. \n\nOther features such as manifold radius and \"alignment\" are also not measured. The connection to \"untangling\" is also not mentioned. Weight changes are used mainly as a strawman to invoke connections to NTK and need not be done. \n\nFigure 2 is very confusing with a nonsensical caption, \"Higher capacity means that a higher number of\nmanifolds per neuron can be packed in the neural state space.\" What does it mean to \"pack manifolds into neurons.\" This kind of shoddy language obfuscates the message to the reader. \n\nAs the current writing stands, this seems to contribute no more (and likely less by way of confusion) to the work in 2018 by Chung et al. which describes the full intuition of manifold capacity and outlines its use both in neuronal and neural networks." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Could the authors provide an example of the lazy regime? Specifically, how can a neural network be trained without actually modifying its internal features?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The authors present both theoretical analysis and empirical evidence demonstrating that manifold capacity effectively quantifies the degree of feature learning. Comparisons with other metrics, such as accuracy and weight changes, further underscore the efficacy of manifold capacity in this context.\n2. The authors offer insightful empirical findings on the relationship between manifold geometry and learning stages, with applications in neuroscience and OOD detection. Robust experiments substantiate these findings, providing a solid foundation for their conclusions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel framework for understanding neural network feature learning beyond the “lazy” versus “rich” dichotomy by focusing on task-relevant manifold untangling using representational geometry. The authors introduce manifold capacity as a key metric to quantify feature learning richness, and explore how manifold geometric measures reveal distinct learning stages and strategies. This framework is applied in contexts ranging from standard machine learning tasks to neuroscience, offering insights into structural inductive biases and challenges in out-of-distribution (OOD) generalization." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The structure needs refinement, as it currently feels like the authors have packed too much content into the main paper, resulting in diminished clarity. The main paper covers a wide range of topics—from manifold capacity to manifold geometry, from theory to experiments—and the relationship between manifold capacity and various manifold geometry measures is weakly explained, relying primarily on Figure 2c with minimal analysis. I suggest that the authors avoid treating manifold geometry as a separate section, even though some interesting findings are presented. An alternative approach would be to frame geometry as an extension of capacity (or to present capacity itself as a facet of manifold geometry) and to provide necessary analysis (I notice there is analysis in the appendix. It's better to consolidate relevant analysis from the appendix into a concise summary in the main paper).\n2. The algorithm is not clearly presented, which also appears to be a side effect of the structural issue noted above. Given that the paper aims to provide practical insights into feature learning, with applications in neuroscience and machine learning, it is important to ensure readability for readers unfamiliar with the manifold background. Thus, a clear algorithm outlining the process for computing capacity during training would be essential." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Quantifying feature learning beyond lazy versus rich through manifold capacity and representational geometry." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024beyond,\ntitle={Beyond the Lazy versus Rich Dichotomy: Geometry Insights in Feature Learning from Task-Relevant Manifold Untangling},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4IRYGvyevW},\nnote={under review}\n}" }, "abstract": { "value": "The ability to integrate task-relevant information into neural representations is a fundamental aspect of both human and machine intelligence. Recent studies have explored the transition of neural networks from the *lazy* training regime (where the trained network is equivalent to a linear model of initial random features) to the *rich* feature learning regime (where the network learns task-relevant features). However, most approaches focus on weight matrices or neural tangent kernels, limiting their relevance for neuroscience due to the lack of representation-based methods to study feature learning. Furthermore, the simple lazy-versus-rich dichotomy overlooks the potential for richer subtypes of feature learning driven by variations in learning algorithms, network architectures, and data properties.\n\nIn this work, we present a framework based on representational geometry to study feature learning. The key idea is to use the untangling of task-relevant neural manifolds as a signature of rich learning. We employ manifold capacity—a representation-based measure—to quantify this untangling, along with geometric metrics to uncover structural differences in feature learning. Our contributions are threefold: First, we show both theoretically and empirically that task-relevant manifolds untangle during rich learning, and that manifold capacity quantifies the degree of richness. Second, we use manifold geometric measures to reveal distinct learning stages and strategies driven by network and data properties, demonstrating that feature learning is richer than the lazy-versus-rich dichotomy. Finally, we apply our method to problems in neuroscience and machine learning, providing geometric insights into structural inductive biases and out-of-distribution generalization. Our work introduces a novel perspective for understanding and quantifying feature learning through the lens of representational geometry." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Computational neuroscience", "storage capacity", "neural manifolds", "representational geometry", "rich and lazy learning", "training dynamics", "feature learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/50738ce430af44d4b48fd2f40874dc6db9646568.pdf" }, "presentation": null, "primary_area": { "value": "applications to neuroscience & cognitive science" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Beyond the Lazy versus Rich Dichotomy: Geometry Insights in Feature Learning from Task-Relevant Manifold Untangling" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4IYdCws9fc
REFINE: Inversion-Free Backdoor Defense via Model Reprogramming
main
Active
Backdoor Defense;Model Reprogramming;Backdoor Attack;AI Security
alignment, fairness, safety, privacy, and societal considerations
5;5;5;5
4;4;4;3
2;2;3;3
3;2;2;2
4;3;2;2
5
3.75
2.5
2.25
2.75
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "In Figure 3, why did the color of the part of the image outside the trigger also change?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Extensive experiments demonstrating the effectiveness of REFINE across different datasets.\n\n- Thorough ablation studies validating each component's contribution" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper propose an inversion-free backdoor defense method based on model reprogramming, REFINE. REFINE consists of an input transformation module and an output mapping module. The key idea is to transform both input and output domains to break the trade-off between model utility and defense effectiveness faced by existing pre-processing based defenses." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The connection between motivation and the proposed methods is not very close. However, the analysis of BTI-based defenses does not involve this domain-based perspective. The paper didn’t discuss whether and how the purification process in BTI-based defenses alter the image domain.\n\n- The theoretical analysis through Theorem 1 effectively explains the limitations of transformation-based defenses by quantifying how domain transformations affect defense performance.\n\n- Experiments mainly focus on ResNet. Conducting experiments under different network structures is beneficial for verifying the effectiveness of the proposed method, for example, VGG, DenseNet etc.\n\n- Requiring 20% clean data may be impractical in many real-world scenarios.\n\n- The proposed method requires training a U-Net, which involves a significant computational cost." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The two sentences (i.e., \"by first obtaining ... outcomes\" and \"the internet ... poisoned samples)\") in the Section 1 contradict each other. What is the difference between prior information and prior knowledge?\n2. This paper focuses on the dirty-image backdoor attacks. But considering the recent clean-image backdoor attacks that do not have trigger features, can the proposed defense method work?\n3. In Section 3.2, the \"Pad Size\" and \"Mask Ratio\" are not defined before. it is necessary to clarify.\n4. In this paper, there are many long sentences, which makes it hard to understand. Such as \"Additionally, we treat ... the transformation\" on Page 4.\n5. In Section 3.2, the authors did not show the dataset used for limitation analysis. Please clarify it.\n6. In Section 4.2.1, the authors present that traditional model reprogramming methods are insufficient to remove triggers. I suggest that the authors explain why traditional methods are insufficient in detail.\n7. The discussion about the feature distance changes in the proposed input transformation module is lacking. It is better to add it to highlight the function of this module." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. A theoretical analysis demonstrates that the effect of backdoor defenses is bounded by the distance of the output features before and after the preprocessing. Therefore, existing methods can not break the trade-off between the model utility and the defense effectiveness.\n2. The proposed method is novel and interesting. By integrating model reprogramming techniques, they only need to change the model input without changing the model parameters to achieve backdoor elimination, and it does not affect the original performance of the model." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper analyzes existing pre-processing-based backdoor defense methods and their limitations. Then, a simple yet effective method is proposed. This method utilizes model reprogramming techniques rather than model retraining, which not only eliminates backdoor attacks but also maintains the model's performance. The evaluation of different backdoor attacks also demonstrates the effectiveness of the proposed method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The authors discuss the pre-processing defense methods, i.e., input-transformation defenses and BTI-based defenses, and analyze their limitations in details. However, the proposed methods actually belong to the input transformation-based method. This paper also spend a large amount of time to analyze and compare BTI methods with the proposed method, which makes it hard to read.\n2. This paper assumes that they have access to an unlabeled dataset that is independent and identically distributed to the training dataset of the pre-trained model. The authors need to clarify how to acquire this dataset. In addition, since the performance of model reprogramming methods is related to the distribution of input data, if the author cannot obtain data with the same distribution, will it affect the performance of the model?\n3. The experiment is pretty insufficient. REFINE is only evaluated with the ResNet-18 model. The evaluation under more complex models (such as ResNet-50) or other architecture models (such as InceptionV3 or VGG16) is lack." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Please improve the soundness of the limitation study.\n2. Please discuss the reasonability of the categories of the mentioned defenses.\n3. Please explain the different experimental results." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Revisit the pre-processing defenses against backdoor attacks and reveal their limitations. The pre-processing-based defense is important to protect model security while not changing model structure or weights.\n2. Propose a pre-processing defense against backdoor attacks, which seems to be simple but effective.\n3. Conduct extensive experiments to demonstrate the effectiveness of the proposed defense." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes REFINE, an inversion-free backdoor defense method based on model reprogramming. Based on the research, the authors revisit existing pre-processing-based backdoor defenses and reveal their limitations. The proposed defense, REFINE, introduces trainable input transformation and output mapping modules for reprogramming and incorporates cross-entropy and supervised contrastive losses to enhance defense performance. Extensive experiments are conducted to validate the effectiveness of REFINE." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The claim of the limitations of prior works are subjective and confusing. For the first limitation, the authors think \"transformation-based backdoor defenses methods face a trade-off between utility and effectiveness\". So, can the proposed defense overcome this limitation? From the design and experimental results, REFINE also suffer from the same problem. Otherwise, the BA with REFINE should be same with original model. Moreover, the authors try to utilize experiments to validate their claims. However, the experimental setting is not fair enough. For example, the authors mention ShrinkPad as the baseline. However, in that paper, ShrinkPad is not the best defense method. Also, for the second limitation, the authors do not analyse the SOTA work, and the shown experimental results are different from the original paper, e.g., the experiments with BADNET shown in Fig.3.\n2. The categories of the mentioned defenses are not sound enough. According to the manuscript, the BTI-based defense (e.g., NC) reverses the backdoor trigger to eliminate the trigger in the inputs. However, such a kind of defense does not only work in this way. Thus, the comparison seems to be not fair enough.\n3. The experiments do not involve some SOTA works. e.g, [1, 2]. Moreover, the experimental results differ from the original paper a lot, e.g., BTI-DBF. Please explain why.\n[1] Neural Polarizer: A Lightweight and Effective Backdoor Defense via Purifying Poisoned Features\n[2] Black-box Backdoor Defense via Zero-shot Image Purification" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "See weakness part." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The idea may seem bold and imaginative, but it is worth exploring in depth. I believe both the threat model setup and the proposed solution are good.\n2. Overall, the paper is well-written, making it relatively easy for someone unfamiliar with model reprogramming, like myself, to grasp the design concepts.\n3. The method operates effectively with a small amount of unlabeled data, which not only cleverly avoids direct comparison with fine-tuning-based defenses but also demonstrates its practicality through ablation experiments that confirm its minimal data requirements." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the challenge of backdoor attacks in deep neural networks, where current defenses struggle to balance accuracy and trigger removal. The proposed REFINE method combines input transformation and output remapping to effectively neutralize backdoors without relying on trigger inversion. Experiments show REFINE's strong defense capabilities while maintaining model performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.Regarding the theoretical explanation in the paper (Formula 1), I feel it’s missing a key component and has only achieved about 50% of its purpose. It aims to explain how amplifying the distributional differences in output features changes the variation in prediction results. As the authors emphasize, model reprogramming significantly amplifies these distributional differences, and trigger patterns can be randomized. I agree with this point, but at this stage, the performance guarantee for clean samples doesn’t tie back to this theory; I see it as being empirically supported by the two losses. In fact, even with model reprogramming, Formula 1 still holds, so it may not fundamentally support the authors’ viewpoint (since Formula 1 is a rule that must be met across the overall distribution; I believe focusing on local distributions could be a potential solution).\n\n2.The whole experiments are only conducted on a ResNet-18 model. Furthermore, the discussion of the black-box part in the experiments seems counterproductive to me; I don’t understand the rationale behind setting up absurdly similar black-box and substitute models for exploration.\n\n3.The adaptive attack setup is unreasonable. Based on the general consensus in the community, in adaptive attacks, the attacker is assumed to know the defender’s hard-coded choices. Disallowing the defender from making real-time adjustments (which is a strategy directly aimed at adaptive attacks) is therefore a logical error. Additionally, the authors haven’t clearly explained why a conflict exists between adaptive loss and backdoor loss, resulting in a decrease in BA, and I don’t clearly see a trade-off here.\n\n4.Why is BTI-DBF unable to reverse even the most basic BadNet trigger? I seriously question the validity of this experiment. In the original BTI-DBF paper, it’s evident that it can reconstruct the triggers of certain fixed samples quite effectively.\n\n5.Additionally, I would like to see more examples of transformed samples. Do transformed samples from the same class still share some visual similarities, or do they contain almost no human-recognizable information? What is the underlying significance of these transformations? I am curious if the authors could offer some more unique, high-level insights." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024refine,\ntitle={{REFINE}: Inversion-Free Backdoor Defense via Model Reprogramming},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4IYdCws9fc},\nnote={under review}\n}" }, "abstract": { "value": "Backdoor attacks on deep neural networks (DNNs) have emerged as a significant security threat, allowing adversaries to implant hidden malicious behaviors during the model training phase. Pre-processing-based defense, which is one of the most important defense paradigms, typically focuses on input transformations or backdoor trigger inversion (BTI) to deactivate or eliminate embedded backdoor triggers during the inference process. However, these methods suffer from inherent limitations: transformation-based defenses often struggle to balance the intensity of transformations with preserving the model's accuracy, while BTI-based defenses require accurate reconstruction of the trigger patterns, which is rarely achievable without prior knowledge. In this paper, we propose REFINE, an inversion-free backdoor defense method based on model reprogramming. REFINE consists of two key components: (1) an input transformation module that disrupts both benign and backdoor patterns, generating new benign features; and (2) an output remapping module that redefines the model's output domain to guide the input transformations effectively. By further integrating supervised contrastive loss, REFINE enhances the defense capabilities while maintaining model utility. Extensive experiments on various benchmark datasets demonstrate the effectiveness of our REFINE and its resistance to potential adaptive attacks." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Backdoor Defense", "Model Reprogramming", "Backdoor Attack", "AI Security" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/69b0fb1497a8c6ace495f0e0afe647cdf0926876.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/0971dc6452f2bca11056898fa40c9009bcec8d05.zip" }, "title": { "value": "REFINE: Inversion-Free Backdoor Defense via Model Reprogramming" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4JBEpP6eRS
ZIP-FIT: Embedding-Free Data Selection via Compression-Based Alignment
main
Active
data centric machine learning;autoformalization;large language models;reasoning
neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)
3;3;6;8
4;3;4;4
1;2;3;4
2;2;3;4
3;3;3;4
5
3.75
2.5
2.75
3.25
0.544331
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "### Minor comments\n\nFigure 3, page 5: \nThe color bar is labeled \"Gzip Alignment\" instead of\n\"ZIP-FIT-Alignment\" from Algorithm 1; it may be confusing to readers.\n\nFigure 3, page 5, line 231: \nPlease mention also in the figure caption that the test loss is\ncalculated on ProofNet data." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The paper is concise, sound, well written, and the experimental section shows promise for the method, especially with regard to other embedding-free methods.\n\nThe conceptual simplicity combined with the empirical results of the method is an especially strong point of the work." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a new data selection mechanism based on text\ncompression distances. The concept of using compression methods for\ndeep learning follows several modern practical results and theoretical\nmotivations that language modeling is fundamentally based in text\ncompression. The method's conceptual simplicity combined with strong\nempirical results make it stand out as a modern way for filtering for\naligned data." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Ideally, it would be shown how the size of $n$ (i.e., number of\nsamples from the target domain $p$) influences the performance of the\nmethod. If it is possible to pick $n$ just sufficiently large enough,\nit would greatly improve the computational efficiency of the method\nfor large target datasets.\n\nExperiments in other domains would be really nice to better\ndemonstrate the generalization capabilities of the method. Possibly\nthere is data that is not well-suited to compression and accordingly\nZIP-FIT, or where the data's compression factor varies too much\nbetween samples?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Could you provide further insights into how ZIP-FIT might perform with data that have higher variability and diverse syntactic structures, such as conversational datasets?\n2. Can you clarify the theoretical basis for using gzip compression over other compression methods that might exploit redundancy differently? Would alternative compression algorithms affect the performance of ZIP-FIT?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. ZIP-FIT’s embedding-free approach is a refreshing deviation from common embedding-based methods, offering a novel solution by leveraging gzip compression. The concept of using normalized compression distance (NCD) as an alignment metric is insightful and could inspire future research in embedding-free methodologies for various data selection tasks.\n2. The empirical results support the claims, showing that ZIP-FIT achieves faster convergence and better performance than established methods. The experiments were conducted on both AutoFormalization and code generation tasks, demonstrating ZIP-FIT's versatility across different domains.\n3. The paper is well-structured, with a clear exposition of the algorithm, experimental setup, and results. The figures effectively illustrate the performance benefits of ZIP-FIT.\n4. ZIP-FIT could represent a significant advancement in data selection for machine learning, particularly in computationally constrained environments. Its potential to optimize model fine-tuning with minimal resource requirements makes it highly applicable for real-world use cases, especially in domain-specific and low-resource applications." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces ZIP-FIT, an embedding-free data selection method leveraging gzip compression to measure the alignment between training and target domains. Unlike existing approaches that rely on neural embeddings, ZIP-FIT uses a computationally efficient compression-based alignment metric, enabling faster data selection while maintaining high relevance to the target task. Empirical evaluations demonstrate ZIP-FIT’s superiority over baselines DSIR and D4 in AutoFormalization and code generation tasks, achieving significantly faster convergence and lower cross-entropy loss with reduced computational costs. ZIP-FIT’s promise lies in its scalability and effectiveness, particularly in low-resource settings, where traditional embedding-based methods may be impractical." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. While ZIP-FIT achieves excellent results on the tasks tested, its reliance on gzip compression may limit its effectiveness in complex semantic domains where relationships are nuanced and less compressible. Embedding-free approaches, while efficient, may not be ideal for tasks that require deep semantic understanding or complex syntactic relationships." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "As specified in the \"Weaknesses\" section:\n- What is the score of the fine-tuned LLM using ZIP-FIT on benchmarks like HumanEval and PubMedQA compare to LLMs fine-tuned without using ZIP-FIT?\n- How does ZIP-FIT compare to prior method like https://arxiv.org/abs/2405.00705 in terms of both running time and final model score?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- This paper is well-presented and well-motivated.\n- Studying computation-efficient methods for data selection in LLM instruction fine-tuning is a promising research direction.\n- The proposed ZIP-FIT is intuitive and easy to follow.\n- The proposed approach bypasses the need for LLM forward computation to obtain embeddings, making it computationally efficient.\n- The presented experimental results seem promising." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes ZIP-FIT, an efficient, embedding-free method for selecting high-quality, domain-specific fine-tuning data for language models (LMs). Prior methods often rely on computationally expensive neural embeddings or classifiers to filter aligned datasets, while those based on N-gram similarity may lack the structural depth needed for complex tasks like code generation. In contrast, ZIP-FIT leverages gzip compression to evaluate data alignment with target domains, based on the idea that compression algorithms encode information in a way similar to neural networks. The ZIP-FIT approach eliminates the need for LM forward passes to obtain embeddings, making it efficient and particularly suitable for low-resource environments. Experimental results show that ZIP-FIT outperforms prior data selection methods, such as DSIR and D4, as measured by test loss." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- [Major] The proposed method seems very simple and straightforward; using a gzip-style method to embed data appears to be a relatively standard approach.\n- [Major] All experimental results are based on test loss, which may not be very reliable. It would be essential to conduct evaluations on some standard benchmarks, such as HumanEval and MBPP for code evaluation, to demonstrate the scores the model can achieve.\n- It is unclear how the proposed ZIP-FIT compares to prior, more complex data selection methods in terms of both running speed and final model quality (e.g., [1]), aside from deduplication approaches like D4.\n- [Minor] The paper seems to be written somewhat in rush, the figure quality of Figure 2 does not seem to be very high.\n\n[1] https://arxiv.org/abs/2405.00705" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "(1) Could the authors provide additional evidence to support the claim that gzip is effective in capturing syntactic and structural relationships in textual sequences?\n\n(2) Would the authors be able to demonstrate the effectiveness of their approach using evaluation metrics beyond cross-entropy test loss, and compare it to relevant baselines, such as those mentioned earlier?\n\n(3) Could you provide more insight into why D4 was excluded from the code generation experiments, and specifically how it affected model performance?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "(1) Problem Significance: The author tackles a crucial problem in low-resource settings, addressing the challenge of fine-tune data selection without relying on GPU-intensive and embedding-based methods. This is a highly relevant and impactful research direction.\n\n(2) Innovative Filtering Criterion: The authors' inspiration from gzip compression methods has led to the proposal of a novel and intriguing selection criterion. This approach is not only interesting but also demonstrates out-of-the-box thinking, making it a notable contribution to the field." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces an innovative, embedding-free data selection method for efficient fine-tuning of large language models. Drawing inspiration from gzip compression techniques, the authors propose utilizing Normalized Compression Distance as a metric to filter and prune fine-tuning datasets. The authors conduct a comparative analysis with prior embedding-free methods, originally designed for filtering pre-training datasets, on Autoformalization and Python coding tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(1) **Inadequate Baselines**: The authors propose a data selection method for model alignment, but only compare it with prior works such as DSIR and D4, which were primarily designed for data selection during the pre-training phase. A more comprehensive literature review on data pruning methods for model alignment is lacking, including embedding-based methods [1], LLM model response metrics [2], Gradient-based metrics [3],Quality metrics judged by LLMs [4], inference loss on evaluation sets [5].\n\n(2) **Evaluation Metrics**: The authors primarily use test data cross-entropy loss as the evaluation metric, results are thus not surprising given that the data selection method uses the test data to anchor the selection criteria. However, the authors do not compare their results with widely accepted metrics in the research community for the studied downstream tasks, such as:\n\n(a). Autoformalization: proof success rates on miniF2F [6,7]\n\n(b). Python coding: functionality pass rates (pass@k on HumanEval) based on unit-tests [8,9]\n\n\n(3) **Clarifications on Motivation**: In Section 2.3, the authors argue that n-grams fail to capture syntactic or structural relationships within the data, while hypothesizing that gzip does. However, this hypothesis is not supported by theoretical or empirical evidence, weakening the motivation for the proposed approach. It is also not compared on if the proposed approach is better or worse than high-resource methods, such as embedding-based methods.\n\nReferences: \n\n[1] DEFT-UCS: Data Efficient Fine-Tuning for Pre-Trained Language Models via Unsupervised Core-Set Selection\n\n[2] From Quantity to Quality: Boosting LLM Performance with Self-Guided Data Selection for Instruction Tuning\n\n[3] LESS: Selecting Influential Data for Targeted Instruction Tuning\n\n[4] Alpagasus: Training a better alpaca with fewer data\n\n[5] Instruction Mining: Instruction Data Selection for Tuning Large Language Models\n\n[6] Autoformalization with Large Language Models\n\n[7] LEGO-Prover: Neural Theorem Proving with Growing Libraries\n\n[8] Evaluating Large Language Models Trained on Code\n\n[9] Is Your Code Generated by ChatGPT Really Correct? Rigorous Evaluation of Large Language Models for Code Generation" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "use gzip to select optimal data for code and autoformalization" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024zipfit,\ntitle={{ZIP}-{FIT}: Embedding-Free Data Selection via Compression-Based Alignment},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4JBEpP6eRS},\nnote={under review}\n}" }, "abstract": { "value": "Selecting high-quality, aligned fine-tuning data is crucial for improving the downstream performance of language models (LMs). Automatic data selection in these scenarios is challenging and often inefficient due to previous approaches relying on neural embeddings or limited n-gram representations to identify aligned datasets. In addition, traditional data selection methods often focus on increasing the size of the training data, making them computationally expensive to use and data inefficient. In this work, we introduce ZIP-FIT, an embedding-free, data-efficient selection framework that leverages gzip compression to measure the alignment between training data and target domains. We show that ZIP-FIT significantly outperforms two leading baselines, DSIR and D4, in selecting high-quality data for ProofNet, a formal mathematical dataset, and HumanEval, a benchmark for code generation tasks. Specifically, ZIP-FIT demonstrates a computational speed advantage, performing data selection up to 65.8\\% faster than DSIR and achieving its lowest cross-entropy loss up to 85.1\\% faster. Our findings suggest that ZIP-FIT offers a scalable and adaptable approach for data selection, enabling more precise fine-tuning for code generation domains. By demonstrating that embedding-free data selection can outperform established methods like DSIR and D4, our research opens new avenues for optimizing model training, thereby enhancing the effectiveness and efficiency of machine learning workflows." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "data centric machine learning", "autoformalization", "large language models", "reasoning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/a706bb5d7465dc4f64fab987f6d76be8adb5a408.pdf" }, "presentation": null, "primary_area": { "value": "neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "ZIP-FIT: Embedding-Free Data Selection via Compression-Based Alignment" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4JK2XMGUc8
Free Hunch: Denoiser Covariance Estimation for Diffusion Models Without Extra Costs
main
Active
diffusion model;conditional generation;inverse problems;denoiser covariance estimation
generative models
5;5;5;6;6
3;3;2;4;3
3;2;3;3;4
2;2;2;3;3
1;2;2;2;2
5.4
3
3
2.4
1.8
0.645497
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- The authors assume that the observation model p(y|x0) is linear gaussian. Is there any other real world problems can be expressed in this form? \n- Continue with weakness section: Can you provide more experimental results? \nFor example, does the performance changes when using more Heun steps? does the improvement also happen to other type of discretization methods, e.g. Euler? Also there are many other ODE samplers and SDE samplers, if you change the ODE samplers with SDE ones, does the introduced stochasticity make any difference when using your covariance estimate method?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Novel method, proposed a covariance estimate method via separately updating along time and position/space. Also provide a practical implementation for the proposed method. \n- The paper provides theoretical insights into why accurate covariance estimation is crucial for unbiased conditional generation, which is supported by experimental results on inverse problems." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce a new covariance estimation method which makes use of covariance information in denoiser and the trajectory curvature without introducing significant additional compute. Moreover, to make the approach suitable for high-dimensional data (e.g. covariance matrices storage issue), the low-rank updates is proposed. The authors validate their approach on linear inverse problems, demonstrating its effectiveness compared to baselines under four metrics." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The experiment section is not as strong as methodology section: only one dataset and one ODE denoiser is tested under low NFE setting. The design of the experiment and the corresponding results analysis can be improved. \n- Only one ODE sampler (Heun) with steps=15, 30 is tested. While indeed results show the importance of accurate covariance estimate, it’s hard to conclude that this is applicable to all standard diffusion models." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. In the image data results, adding the online update steps to either identity covariance or the FH covariance does not always improve the performance. Is there any explanation or further investigation on why it could happen? \n2. Table 1 shows that in most cases, FH+online outperforms FH. But in Figure 7, only the denoised image from FH is demonstrated. Is there any vision difference between FH and FH+online in terms of the images in Figure 7?\n3. Notational /exposition issues:\n 1. In Eq. (1), the \"noising forward process\" should be defined in the joint distribution of the stochastic paths or a stochastic differential equation. Eq. (1) gives the margin distribution of the state at time t, which cannot formally \"define\" a process.\n 2. In Eqs. (2) and (3), $\\dot\\sigma$ and $\\omega_t$ are undefined. \n 3. Above Eq. (7), $p_{0|t}$ is undefined. \n 4. In Eq. (11), add a bracket to make the inverse clear. I.e., $[\\nabla_{\\mathbf{x}}^2\\log p(\\mathbf{x},t)]^{-1}$.\n 5. The last few sentences in Sec. 3.1. confused me. The integral $\\int p(\\mathbf{x}')\\mathcal N(\\mathbf{x}'\\mid \\mathbf{0}, \\sigma^2\\mathbf{I})d\\mathbf{x}'$ is not a convolution. \n 6. In Eq. (18), should the right-hand side be $\\left[\\mathbf{\\Sigma}_{0\\mid t}(\\mathbf{x}+\\Delta\\mathbf{x})\\right]\\Delta\\mathbf{x}$.\n 7. In Eq. (23), use either $\\nabla_{\\mathbf{x}}$ or $\\nabla_{\\mathbf{x_t}}$ consistently throughout the paper.\n 8. In Eqs. (23) and (24), $\\mathbf{A}$ is undefined.\n 9. Above Eqs. (25), $\\mu_{0\\mid t}$ should be in boldface. (and many other occurrences throughout the paper).\n 10. In Sec. 4.2., \"we noticed that the guidance scale is overestimated the larger the dimensionality is, leading to overconfidence.\" does not read well --- the authors probably left over some words from a previous edition. \n 11. Throughout the paper and the appendix, please only label the equations you refer to later." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "Originality and significance: The proposed method is relevant to the diffusion models and is novel in that it provides a fast and efficient algorithm for estimating a broader class of covariance structures. \n\nQuality and clarity: The paper clearly connects the proposed method to existing literature on similar problems." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors proposed a new methodological framework for estimating covariance in the generative methods of diffusion models. The estimation of the covariance in the reverse diffusion process is purely based on the existing samples through a two-step updating scheme. The examples show the proposed method outperforms other existing methods in denoising." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The exposition can be improved as the current manuscript contains many inconsistent notations, undefined variables, and many typos. I would say it is a lovely work with appealing results, but unfortunately, the manuscript should be significantly revised and proofread to be easier to follow." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Since the data covariance is estimated in the DCT basis where the covariance is approximately diagonal, have you considered training a model to directly estimate the denoiser covariance in this basis, or with a diagonal + low-rank representation?\n\n2. The numerical results in Table 1 seems to be worse than that in the DDNM paper, is this because the number of sampling steps is limited?\n\n3. What is the computational overhead for estimating the data covariance and doing the low-rank updates, compared to the cost of evaluating the neural network?\n\n4. What is the variance exploding case first mentioned in Line 213? This doesn't seem to be explicitly defined in the paper.\n\n5. What is the difference between $\\mu_{0\\mid t}(x)$ and $\\mu_{0 \\mid t}(x_t)$, as well as $\\sigma(t)$ and $\\sigma_t$?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. It is difficult to efficiently estimate the covariance of the denoiser due to its high-dimensionality, and methods that attempt to estimate it has have to use reduced-complexity representations. Using the DCT-diagonal basis to estimate the initial data covariance and a low-rank update inspired by quasi-Newton methods is a novel approach.\n\n2. Because this method does not require training an additional neural network, it can be applied to existing pre-trained diffusion models to be used for solving inverse problems.\n\n3. The experimental results seem promising and improves on previous methods on both qualitative examples and quantitative metrics." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a new method to estimate the covariance during the denoising process in diffusion models. The key idea is using Tweedie's formula to relate the covariance of the denoiser with the Hessian of $\\log(p(x_t))$, then using the gradient of $\\log(p(x_t))$ (i.e. the score function) evaluated during each denoising step to estimate this Hessian. Starting with an estimate of the covariance of the data distribution, the algorithm performs low-rank updates to this covariance matrix, inspired by quasi-Newton methods, using the score function estimates given by the neural network. This method is then applied to fast sampling for solving inverse problems, showing improvements over other methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. There needs to be more theoretical or empirical justification for some of the algorithmic choices made, such as the choice of quasi-Newton method or the DCT basis used to approximate the data covariance.\n\n2. The notation in the paper is inconsistent and the definition of some quantities are not clear, for example are $p(x, t)$, $p(x_t)$, $p_t(x)$ referring to the same quantity? Also, it is not clear what exactly are the full algorithms used for solving linear inverse problems; Algorithms 1 and 2 in the paper only updates the covariance estimate.\n\n3. The experimental improvements for solving linear inverse problems seems to be only for the few-sample-step regime. It is unclear if this covariance estimation method can be helpful in other applications such as unconditional sampling, or with non-linear guidance terms." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. refer to weaknesses.\n2. in Figure 6, these setting of y is what? inpating transform or deblurring? Does this guidance meaningful across different y|x_0 and dataset?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. It is novel and reasonable to design low-dim expression and update rules for covariance approximation.\n2. There are abundant math derivations to support the methods" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a new framework for estimating covariance in diffusion models, addressing issues in current methods such as high test-time computation and heavy approximations. The proposed method utilizes readily available covariance information from training data and the curvature of the generative trajectory. The authors also present a method for transferring covariance estimates across noise levels." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. To handle linear inverse problems using diffusion models, there are a lot of classes of methods [1]. The experiments in this work only compare some of them, arthors should at least mension why take them out of comparison.\n2. There lack of a experiment to back up whether the low-dim covariance of the proposed method can indeed match the underlying true one. I understand this cannot be done under high dimensional case, mayba a 2D or 10D suffice.\n3. There is other applications of diffusion models would need covariance like causual reasoning [2], likelihood-evaluation [3][4] and adjoint guidance[5], maybe you could add a discussion to these.\n\n\n[1] Daras, Giannis, et al. \"A survey on diffusion models for inverse problems.\" arXiv preprint arXiv:2410.00083 (2024).\n\n[2] Sanchez P, Liu X, O'Neil A Q, et al. Diffusion models for causal discovery via topological ordering[J]. arXiv preprint arXiv:2210.06201, 2022.\n\n[3] Lu C, Zheng K, Bao F, et al. Maximum likelihood training for score-based diffusion odes by high order denoising score matching[C]//International Conference on Machine Learning. PMLR, 2022: 14429-14460.\n\n[4] Anonymous. Gradient-Free Analytical Fisher Information of Diffused Distributions.\n\n[5] Song K, Lai H. Fisher Information Improved Training-Free Conditional Diffusion Model[J]. arXiv preprint arXiv:2404.18252, 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "see Weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. It is novel to leverage L-BFGS way to approximate the covariance in a low-dim manner in DMs\n2. the mathematical derivation is rigorous, though it is quite similar to the math of BFGS optimization\n3. The design of initial covariance to be the data covariance is reasonable." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduce a L-BFGS-like method to maintain the covariance information of denoiser along the inference path to better solve the linear inverse problems via diffusion models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The presentation of the paper could be improved for clarity. The term 'conditional generation' is often associated with 'text-guided' or 'label-guided' generation. However, in this paper, it refers to conditions based on partially corrupted samples. While this interpretation is not incorrect and indeed falls under the classifier-guided framework, it might be confusing for readers at first glance. \n\n2. There are typographical errors present in the paper, for instance, in equation (98). These need to be corrected for accuracy and better understanding.\n\n3. The paper lacks a theoretical bound on the approximation error of the covariance using the proposed method. Both Equation 6 and the low-rank approximation involve approximation errors." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a new, efficient method for denoiser covariance estimation in diffusion models, which can be used for conditional generation and inverse problems" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024free,\ntitle={Free Hunch: Denoiser Covariance Estimation for Diffusion Models Without Extra Costs},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4JK2XMGUc8},\nnote={under review}\n}" }, "abstract": { "value": "The covariance for clean data given a noisy observation is an important quantity in many conditional generation methods for diffusion models. Current methods require heavy test-time computation, altering the standard diffusion training process or denoiser architecture, or making heavy approximations. We propose a new framework that sidesteps these issues by using covariance information that is available for free from training data and the curvature of the generative trajectory, which is linked to the covariance through the second-order Tweedie's formula. We integrate these sources of information using (i) a novel method to transfer covariance estimates across noise levels and (ii) low-rank updates in a given noise level. We validate the method on linear inverse problems, where it outperforms recent baselines, especially with fewer diffusion steps." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "diffusion model", "conditional generation", "inverse problems", "denoiser covariance estimation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/ee3b68d8cb17efc6aa3b1d0a5d36edc4e6fc7585.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/5c042e4d069a5c3eaccb3fadad08ed86db001629.zip" }, "title": { "value": "Free Hunch: Denoiser Covariance Estimation for Diffusion Models Without Extra Costs" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4JZ56UVJYf
CocoRNA: Collective RNA Design with Cooperative Multi-agent Reinforcement Learning
main
Active
Multi-agent;reinforcement learning;RNA design
reinforcement learning
3;3;5;5
3;4;3;3
2;2;3;2
3;3;2;2
2;3;3;3
4
3.25
2.25
2.5
2.75
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Why the proposed method is only applied to RNA design? Can the method be extended to any other biological sequence (DNA, protein, peptide) design? If yes, why restrict its applicability/generalization?\n\n2. As also stated in the first weakness, what is the main source of benefit of using an RL method, instead of performing collective design for combinatorial optimization as in [1]?\n\n3. How does CocoRNA deal with sparse and/or delayed rewards?\n\n4. How does CocoRNA generalize across different target structures?\n\n5. How is SAER related to/inspired by HER? How the goals are sampled in SAER? How do you gradually reduce the SAER operation, this is not clear in the paper.\n\n6. Can you provide some insight/discussion on how embedding SAER into the training process would not yield a suboptimal solution?\n\n7. In Section 5.3 (and Figure 4), the two ablation studies are done on which dataset? Or is it averaged over replications or datasets-- what do error bars represent? It could have been clarified in the figure caption." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper presents an efficient RNA secondary structure design method using a collective design approach, with empirical evaluations provided on two RNA design benchmark datasets.\n- The paper demonstrates the potential of cooperative MARL approaches to RNA design tasks.\n- The proposed approach addresses a real-world problem, validated through experiments on real-world datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper addresses the challenge of efficient and scalable RNA secondary structure design. Designing RNA sequences that reliably fold into specified structures is difficult due to the complexity of the combinatorial search space. The paper proposes a collective RNA design approach called CocoRNA which uses cooperative multi-agent reinforcement learning. CocoRNA designs RNA sequences by decomposing the design task into subtasks assigned to multiple agents." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The main contribution of the proposed method is presented as designing RNA secondary structure as a \"collective design\" problem. However, the contribution is not novel as the recent work [1] already introduces such a collective design idea, that is, efficiently designing biological sequences using cooperative design framed as a cooperative game between players (here it is called agents), and [1] should be cited in the relevant work section. Based on this, it should be further clarified that what is the main source of benefit of using a MARL method, instead of performing collective design directly using combinatorial optimization as in [1]? What are the key differences and advantages of your method over [1]?\n\n2. The paper lacks a discussion or a theoretical analysis of the convergence of the distributed policies to the global optimum. In the abstract and also in lines 126-129, it is stated that CocoRNA enables such a convergence, however, there is no guarantee or analysis that supports this claim. I would suggest that either a theoretical analysis or some clear discussion/intuition should be provided in the paper or the claims should be modified.\n\n3. It would support the empirical performance of CocoRNA better if an analysis of the performance of CocoRNA under sparse and delayed rewards is presented. Instead of using the reward function in equation (12), how would the performance change under sparse reward (e.g., $R_t$ = {$0$ if $H_t > 0$; $C$ otherwise})?\n\n4. While motivating CocoRNA in the Introduction, the authors state that RL-based methods do not exploit learning to generalize across different target structures (line 62). CocoRNA is stated to mitigate problems of (1) the curse of dimensionality and (2) sparse rewards (although not supported enough for (2)). However, the paper does not present how CocoRNA generalizes across (3) different target structures.\n\n5. I think the related work section (2.1) should be restructured. There should be a section for RL-based methods that are used to design biological sequences such as [4,5]. These are potential SOTA baselines for CocoRNA. In addition, a discussion on why MARL is needed over these RL methods would clarify the contribution within RL & biological sequence design context. Instead of providing general MARL works in detail, this section would have provided the flow from RL to MARL within the problem context.\n\t\n6. An ablation study on the grouping of players would be helpful in showing the effectiveness of CocoRNA regarding the interdependence of nucleotides at different positions. The paper presents only agent per position (n many agents) analysis. How does the performance of CocoRNA change with respect to the agent size?\n\n7. Different than the point above, an ablation study considering the decomposition scheme such as position + structure assigned to an agent would have shown the proposed method's flexibility regarding decomposition choices. This is a more specific decomposition than only structure-based decomposition.\n\n8. Regarding Section 4.4, in Hindsight Experience Replay (HER), additional goals are used to store additional episodes in the replay buffer to deal with sparse reward environments. Hence, the goal influences actions, but not the environment dynamics. In Search-augmented Experience Replay (SAER), how are the goals defined? How are additional goals for the replay sampled? It is not clear to relate SAER to HER and not provide clear explanations. I think to better motivate CocoRNA's sample efficiency and to build a better connection with HER, SAER should have experimented on a sparse reward environment. \n\n9. The empirical evaluation is done against a limited amount (and type) of baselines. The proposed method is compared only against RL-based methods. The paper only cites antaRNA [1] and MCTS-RNA [2] approaches, however, these search-based fundamental approaches should be included as baselines; which would support CocoRNA's performance against a diverse set of baselines. Furthermore, another valid baseline from literature that combines RL with local search [3] could have been considered.\n\n10. It would have supported the generalizability of the proposed approach better if an empirical analysis on any other biological sequence design domain such as protein design (which has some benchmark datasets e.g. GB1) was presented.\n\n11. For a thorough analysis, training performance could have been provided. Further, an ablation over the trained policy (e.g. under different training steps or hyperparameters) could have demonstrated the effectiveness of CocoRNA under various training settings.\n\n12. There is no clear discussion on the limitations and potential drawbacks of the method.\n\n**Minor:**\n\n- In line 114, the reference (Eastman et al…) is doubled. \\cite{} should be corrected to avoid repetition.\n\n- Regarding notations, instead of using the notation k for several places, another symbol could be used. Specifically, it is used both in equation (5) to denote the subsequence of nucleotides and in equations (7), (9) to denote cumulative future rewards.\n\n> [1] Bal, M. I., Sessa, P. G., Mutny, M., & Krause, A. (2023). Optimistic Games for Combinatorial Bayesian Optimization with Applications to Protein Design. NeurIPS 2023 Workshop on Adaptive Experimental Design and Active Learning in the Real World. https://openreview.net/forum?id=ScOvmGz4xH (or alternatively: http://arxiv.org/abs/2409.18582)\n\n> [2] Kleinkauf, R., Houwaart, T., Backofen, R., & Mann, M. (2015). antaRNA–Multi-objective inverse folding of pseudoknot RNA using ant-colony optimization. BMC bioinformatics, 16, 1-7.\n\n> [3] Yang, X., Yoshizoe, K., Taneda, A., & Tsuda, K. (2017). RNA inverse folding using Monte Carlo tree search. BMC bioinformatics, 18, 1-12.\n\n> [4] Eastman, P., Shi, J., Ramsundar, B., & Pande, V. S. (2018). Solving the RNA design problem with reinforcement learning. PLoS computational biology, 14(6), e1006176.\n\t\n> [5] Angermueller, C., Dohan, D., Belanger, D., Deshpande, R., Murphy, K., & Colwell, L. (2019). Model-based reinforcement learning for biological sequence design. In International conference on learning representations. (ICLR)\n\n> [6] Feng, L., Nouri, P., Muni, A., Bengio, Y., & Bacon, P. L. (2022). Designing biological sequences via meta-reinforcement learning and bayesian optimization. arXiv preprint arXiv:2209.06259." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1.\tIt is not clear to the reviewer what is the output of the actor network. Is it the nucleotide type for 1 position or for all positions associated with that agent?\n2.\tIs code available for the proposed algorithm at an anonymous link?\n3.\tThe reviewer suggests adding the part highlighting the difference from how the experience replay is applied from Appendix A to the manuscript.\n4.\tThe authors mention two possible decompositions: (i) position-based; (ii) structure-based. In the experiments section, it seems that only position-based is mentioned. Are structure-based decomposition results also shown in the manuscript?\n5.\tThe proposed method is compared with other RL-based baselines. It would be interesting to compare the proposed method to the other generative-based model baseline (Patil et al, 2024) having a small comparison regarding success rates and discussing which sequences can't be predicted by the generative-based model baseline because of their sequence length.\n6.\tFrom the reviewer's understanding, with n=4, four individual policies are trained. In this scenario, would not be the case to use four shared policies during training (with shared weights). It would be interesting to also have this ablation study.\n7.\tFor the experiments, a random split of the datasets was performed. Similarly to protein-related tasks, a split based on hamming distances of structures to check the generalization capabilities of the policies would be desired in the reviewer’s opinion.\n8.\tIt would be interesting to discuss the trade-offs between the proposed methodology and other generative-based alternatives such as graph neural networks using graph-based representations. As other parts of the structure might also affect the design of the sequence, having partial observations might not give enough information even with a centralized critic. For this, using a GNN architecture for decoding or a GNN-based policy for the RL methodology could alleviate this issue." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.\tA novel and interesting MARL-based methodology is proposed for RNA secondary structure design.\n2.\tA search-based augmented experience replay technique is proposed inspired by HER.\n3.\tThe use of multiple policies improves sample efficiency issues and, given enough computational resources, also improves computational efficiency." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper tackles the RNA secondary structure design problem proposing a novel approach using cooperative multi-agent RL. Multiple policies are jointly trained to design the sequence for parts of the RNA structure and a centralized critic is used to maximize the reward of the entire final sequence. The authors show that this methodology improves sample efficiency and computational efficiency while improving over other traditional RL-based baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tThe framework is still model-based and relies on repeatedly applying folding algorithms for reward calculation.\n2.\tUsing the position-based decomposition, it is hard to justify having different local policies and not a shared policy. At least an ablation seems needed for that.\n3.\tGiven the current manuscript, it is hard for readers to reproduce it, code is not available, there are some details that need additional information, and the random dataset splits might leak data." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "please see weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1) **Significance of the Problem**: The paper tackles the complex challenge of RNA secondary structure design, which holds significant implications in fields like synthetic biology and gene regulation. By addressing the combinatorial nature of RNA design and the need for efficient, scalable methods, the authors provide a contribution to computational biology.\n\n2) **Clear and Well-Structured Presentation**: The paper is well-organized, with each section logically progressing from the problem statement to methodology and experimental evaluation. The clear exposition of the multi-agent reinforcement learning framework, algorithmic details, and ablation studies makes it easy to understand both the innovation and the practical execution of the proposed approach.\n\n3) **Robust Experimental Results**: The experimental results presented on the Rfam and Eterna100-v2 datasets are comprehensive and demonstrate COCORNA’s superior performance over existing methods in terms of success rate and design time. The ablation studies further substantiate the model's robustness, providing evidence that each component of the method contributes to the overall improvement.\n\n4) **Intuitive and Effective Approach**: The proposed multi-agent reinforcement learning framework is well-suited to the complex, distributed nature of RNA design tasks. The decomposition of the design problem and the centralized training with decentralized execution approach provide a practical and computationally feasible solution. The inclusion of the SAER method to improve initial data quality and learning efficiency is a thoughtful addition that strengthens the model’s effectiveness in this challenging domain." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a new approach for RNA secondary structure design by leveraging cooperative MARL. The proposed method, COCORNA, breaks down the RNA design task into multiple sub-tasks managed by individual agents. Through a centralized Critic and decentralized Actor architecture, COCORNA enables these agents to cooperate, aiming to address the combinatorial complexity of RNA sequence folding.\n\nThe model was trained on RNA design tasks using a novel Search-Augmented Experience Replay (SAER) mechanism, which improves initial learning efficiency. Experimental results on the Rfam and Eterna100-v2 datasets demonstrate that COCORNA outperforms existing methods in both design time and success rate, highlighting its potential for efficient and scalable RNA sequence design without further task-specific optimization. This study showcases COCORNA as a promising tool for addressing complex biological sequence design challenges through MARL." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1) **Lack of Visual Aids for Method Explanation**: The paper lacks visual illustrations of the proposed method, which is a drawback given the complexity of multi-agent reinforcement learning and RNA secondary structure design. Effective diagrams and flowcharts could have greatly enhanced the readability and accessibility of the methodology, particularly for readers unfamiliar with MARL frameworks in biological sequence design. Including such visuals would improve the reader's ability to grasp the significance and innovative aspects of this work.\n\n2) **Limited Novelty**: A significant concern lies in the limited novelty of the approach. The paper formulates RNA design as an MARL problem and primarily applies established MARL methods to solve it. The novelty of specific components, such as the reward function and the Search-Augmented Experience Replay (SAER) module, also seems limited. It would strengthen the work if the authors provided more innovative, task-specific adaptations or insights that build on existing methods in a unique way.\n\n3) **Need for Deeper Conceptual Insights**: The paper would benefit from more in-depth conceptual insights into the unique challenges and opportunities specific to RNA design in the context of MARL. For instance, an analysis of how different decomposition approaches (e.g., position-based vs. structure-type-based) impact learning, or a discussion on task-specific challenges in RNA sequence alignment, would offer valuable perspectives. Such insights could highlight the authors’ deep understanding of the problem and provide a stronger foundation for the applicability and potential extensions of COCORNA." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See the weaknessnes above." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The authors efficiently decompose the complex RNA design problem into mutiple sub-tasks. These tasks are allocated to cooperative agents to solve collaboratively. They introduce a search-augmented experience replay method to improve learning\nefficiency, which improves the efficiency of RNA design. The proposed method significantly outperforms existing methods in terms of both design time and success rate." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a novel collective RNA design method based on cooperative MARL to solve the RNA secondary structure design problem. Empirical results demonstrate the outperformance of the proposed method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. This paper is more like an application of MARL in RNA design. The contributions to MARL method to solve the specific issues when applying MARL in RNA design should be stated clearly.\n2. Besides the design of observations and reward functions, the authors should provide more explanations on how the agents cooperatively to sovle the RNA design task. \n3. The authors do not discuss the limitations of the proposed method." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose CocoRNA, a cooperative multi-agent reinforcement learning framework for RNA secondary structure design, which achieves better solving efficiency and higher success rates." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024cocorna,\ntitle={Coco{RNA}: Collective {RNA} Design with Cooperative Multi-agent Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4JZ56UVJYf},\nnote={under review}\n}" }, "abstract": { "value": "Ribonucleic acid (RNA) plays a crucial role in various biological functions, and designing sequences that reliably fold into specified structures remains a significant challenge in computational biology. Existing methods often struggle with efficiency and scalability, as they require extensive search or optimization to tackle this complex combinatorial problem. In this paper, we propose CocoRNA, a collective RNA design method using cooperative multi-agent reinforcement learning, for the RNA secondary structure design problem. CocoRNA decomposes the RNA design task into multiple sub-tasks, which are assigned to multiple agents to solve collaboratively, alleviating the challenges of the curse of dimensionality as well as the issues of sparse and delayed rewards. By employing a centralized Critic network and leveraging global information during training, we promote cooperation among agents, enabling the distributed policies to converge toward the global optimum and resulting in a high-quality collective RNA design policy. The trained model is capable of completing RNA secondary structure design with less time and fewer steps, without requiring further training or search on new tasks. We evaluate CocoRNA on the Rfam dataset and the Eterna100 benchmark. Experimental results demonstrate that CocoRNA outperforms existing algorithms in terms of design time and success rate, highlighting its practicality and effectiveness." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Multi-agent", "reinforcement learning", "RNA design" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/698348cff75e900a5ee741095e8c02700001e3c7.pdf" }, "presentation": null, "primary_area": { "value": "reinforcement learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "CocoRNA: Collective RNA Design with Cooperative Multi-agent Reinforcement Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4JfFW7d1gu
Can We Further Elicit Reasoning in LLMs? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks
main
Active
reasoning;planning;retrieval-augmented generation
applications to computer vision, audio, language, and other modalities
3;3;3;6
3;4;4;3
2;1;3;2
3;1;2;2
3;1;3;3
3.75
3.5
2
2
2.5
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "N/A" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* The paper is clearly written and easy to follow, with a well-articulated description of the proposed approach. The visualizations are also clear and supportive of the narrative.\n* The proposed method demonstrates superior performance compared to baseline methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces CR-Planner, a method combining critic-guided planning with retrieval augmentation to tackle reasoning-heavy tasks. The primary contribution of this work is the use of fine-tuned critic models to assist in action selection during inference, with training data for the critic models derived from offline Monte Carlo Tree Search (MCTS) data collection. Experimental results indicate that the proposed method outperforms baseline approaches." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* While the performance improvement over baselines is evident, it is not entirely surprising. The critic essentially distills the MCTS policy from GPT-4 and is expected to perform better than the original policy, given the presence of a reliable external verifier. Moreover, the comparison between CR-Planner and other baselines may not be fair, as CR-Planner likely incurs significantly higher computational costs due to the extensive computations required for MCTS offline data collection.\n* The training prompts for the critic model appear to be identical to the test tasks, which raises concerns about the generalizability of the learned critic. It would be beneficial to demonstrate the performance of the learned critic on different datasets to establish its ability to generalize beyond the training data. Otherwise, the model may merely approximate the offline MCTS performance.\n* The authors should also include the performance metrics of the vanilla MCTS policy to provide a baseline comparison. This would help quantify the extent of performance degradation when using the learned neural approximator (i.e., the critic).\n* To this matter, the technical contributions presented in this work seem to be limited.\n\nOverall Assessment:\nWhile the paper presents a well-written and clear description of CR-Planner and shows promising performance improvements, the evaluation lacks important details regarding computational fairness and generalizability, as well as its technical contribution limitation. Consequently, I recommend rejection to this submission." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "uestion 1: Although decomposing the planning process into three stages—Reason, GenQuery, and Retrieve—seems intuitively reasonable, it appears that there are no experimental results provided to validate this approach. For example, is there a need to fine-tune an additional critic for evaluating and executing the sampling answers for the \"Standard\" method in baselines? Can the authors provide such results and compare them with those of the CR-Planner?\n\nQuestion 2: Is there indeed a necessity to fine-tune a critic for Sub-Goal Selection? It seems that there is a certain temporal relationship among the stages of Reason, GenQuery, and Retrieve: the system must first reason, then generate a query based on the reasoning result, and finally retrieve the result based on the generated query. Are there scenarios where this temporal sequence does not apply?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Strength 1: The paper aims to solve challenging reasoning problems, such as those found in programming and theorem-driven mathematical reasoning, which is a meaningful endeavor. Additionally, the experimental performance that exceeds baselines by an average of 10.06% is striking.\nStrength 2: The motivation to use fine-tuned critics for evaluating planning answers is intuitively reasonable. Furthermore, identifying the source of planning errors—whether in reasoning or retrieving—and proposing an approach that breaks down the planning process into Reason, GenQuery, and Retrieve is both impressive and rational.\nStrength 3: Overall, the article is well-written, with clear articulation and methodological figures that effectively convey the content." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a novel planning framework called Critic-guided planning with Retrieval-augmentation (CR-Planner). Due to frequent reasoning errors and irrelevant knowledge retrieval in planning, existing LLM-based methods that leverage techniques like Chain-of-Thought (CoT) and Retrieval-Augmented Generation (RAG) often struggle with reasoning-heavy and domain-knowledge-intensive tasks, such as competitive programming and mathematics. To address this, CR-Planner fine-tunes critic models using data collected from Monte Carlo Tree Search (MCTS) to evaluate and execute answers at each stage of planning, thereby enhancing the reasoning capabilities of planning systems. Experiments conducted on challenging tasks that require domain knowledge and reasoning, including competitive programming, theorem-driven mathematical reasoning, and complex domain retrieval problems, demonstrate that CR-Planner outperforms the baseline by an average of 10.06%." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Weakness 1: The training data used for fine-tuning the Critic is collected via MCTS, which imposes a heavy computational load. Moreover, the nodes generated during MCTS are sampled from LLM. Consequently, the process of labeling data by MCTS and LLM is costly.\nWeakness 2: Intuitively, there is a temporal relationship among the three sub-goals: Reason, GenQuery, and Retrieve. The system must first engage in reasoning, then generate a query based on the reasoning result, and finally retrieve the result based on the generated query. Therefore, the idea of fine-tuning a critic for sub-goal selection seems somewhat unnecessary." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. Could the authors clarify the volume and scope of data required for effective critic model training?\n - Is the data collection process designed to be online or offline?\n - Are there distinct types of data required for training each critic model?\n - To what extent is the trained critic model generalizable across different tasks or domains?\n\n2. The paper (lines 149-151) notes that MCTS is used to gather training data that captures global reward information. However, in large language models (LLMs) where the action space is effectively infinite, selecting optimal actions solely based on a value function may be impractical, as an optimal action cannot be deduced simply by comparing all possibilities.\n - How do the authors address the challenges posed by this expansive action space?\n - Given that value functions alone may not guide optimal action selection in LLM settings, what alternative strategies or adaptations are employed?\n\n3. The paper mentions temperature sampling for the REASON and GENQUERY actions and top-k candidates for the RETRIEVE action, yet it is unclear how these discrete and continuous spaces are integrated.\n - Could the authors provide a clearer description of the combined action space used in execution selection, beyond the reference to Appendix C and Table 6?\n\n4. In Equation 1, the policy selects actions based on the reward function rather than the value or action-value function, which may not yield the optimal trajectory.\n - Could the authors explain the rationale for using the reward function over other potential selection criteria that might better optimize the full trajectory?\n\n5. The section \"Collecting data via MCTS\" outlines MCTS with some modified definitions, yet lacks specifics.\n - How does the approach balance exploration within unknown search spaces?\n - Is data collected in a single batch or in increments, and what is the distribution among different data types collected?\n\n6. The experiment section does not address other relevant work on MCTS in reasoning contexts.\n - Could the authors discuss their approach in relation to similar works, such as:\n - *Q*: Improving Multi-step Reasoning for LLMs with Deliberative Planning. http://arxiv.org/abs/2406.14283\n - Quiet-STaR: Language Models Can Teach Themselves to Think Before Speaking. http://arxiv.org/abs/2403.09629\n - Monte Carlo Tree Search Boosts Reasoning via Iterative Preference Learning. http://arxiv.org/abs/2405.00451" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "1. The paper evaluates CR-Planner on diverse, challenging domains, demonstrating substantial improvements in both reasoning-intensive and retrieval-intensive tasks.\n2. By utilizing MCTS for data generation, CR-Planner effectively addresses data scarcity in training critic models, which is a significant practical contribution." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents CR-Planner (Critic-guided Planning with Retrieval-Augmentation), a framework designed to enhance reasoning and factual correctness in large language models (LLMs) by employing critic models for sub-goal selection and execution guidance. Traditional methods like chain-of-thought (CoT) reasoning and retrieval-augmented generation (RAG) have proven effective in structured reasoning tasks, but struggle with complex challenges in areas such as competitive programming and theorem-driven mathematics. CR-Planner addresses these challenges by using fine-tuned critic models to guide reasoning and retrieval, enabling a systematic selection and execution of sub-goals. Monte Carlo Tree Search (MCTS) is utilized to train data collection. Experiments are across competitive programming, math problems, and domain retrieval." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. A key concern with the proposed approach is the soundness of using a critic model for action selection. While the critic model is trained to optimize the reward function, this function alone does not constitute an actionable policy, potentially limiting the framework's effectiveness. This limitation is further complicated by the mixed action space, which combines both continuous and discrete elements in execution.\n\n2. The paper would benefit from substantial revisions to provide clearer explanations of each component. In particular, detailed descriptions are needed for the problem formulation, the critic model’s training methods, and the use of pairwise ranking loss, along with a comprehensive equation and explanation. Additionally, the data collection process for each critic model requires clarification to enhance transparency and reproducibility.\n\n3. The novelty of the proposed approach is limited, as the use of MCTS for reasoning is well-established in existing literature. The lack of comparisons with related works on MCTS-based reasoning approaches makes it difficult to assess the unique contributions of this paper within the field." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "What were the domain-specific tasks used for training the critics?\n\nGiven the need for training domain-specific critics, why not simply fine-tune a domain-specific generator? Perhaps the explicit planning, following the structure of CR-Planner gives a bigger performance boost, but empirically verifying this is important.\n\nWhat was used to motivate the separate fine-tuning of four domain-specific critics? Did you try creating one dataset for all of the critic tasks and fine-tuning a single critic? Results on this would be useful." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The use of stepwise critics seem to address an important problem: guiding the generation of long plans and reasoning traces without compounding errors or other degenerative behaviour. This is especially challenging given the inclusion of RAG, which can help simultaneously address knowledge-intensive aspects of tasks.\n\nThe method is thoroughly described and overall very well-communicated.\n\nThe results show performance gains over relevant benchmarks, although it is unclear whether the comparisons are fair given domain-specific fine-tuning budgets (see 'Weaknesses')." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper structures response planning for reasoning-intensive tasks as a series of reasoning steps and knowledge retrieval steps. To guide this complex plan formation, reasoning, query generation and retrieval critics are used to score the execution of sub-goals. A separate sub-goal selection critic is used for informing sub-goal selection. These critics are obtained by fine-tuning an open-source reward model on data collected via MCTS. Evaluations are performed on a competitive programming benchmark, a theorem-driven math benchmark and two reasoning-heavy domain retrieval benchmarks. Further analysis and comparisons are performed on the programming benchmark." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Nowhere is it stated what tasks are used for training the critic models. I can therefore only assume that training subsets of the benchmarks used for evaluation are used for training the critics. If so, this makes comparison to the current baselines unfair. \n\nThe reliance on domain-specific fine-tuning of several critic models is a notable limitation, especially given that the paper does not compare to simply fine-tuning the generating model with the same budget.\n\nThere is no ablation of the individual critics. The paper shows the importance of fine-tuned, domain-specific critics, but does not ablate the inclusion of a critic for each of the sub-goal execution types and sub-goal selection. \n\nThere are no ablations or experiments showing the impact, in terms of sample efficiency, of MCTS on critic fine-tuning." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We present critic-guided planning with retrieval-augmentation (CR-Planner), a novel framework that leverages fine-tuned critic models to guide both reasoning and retrieval processes through planning." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024can,\ntitle={Can We Further Elicit Reasoning in {LLM}s? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4JfFW7d1gu},\nnote={under review}\n}" }, "abstract": { "value": "State-of-the-art large language models (LLMs) exhibit impressive problem-solving capabilities but may struggle with complex reasoning and factual correctness. Existing methods harness the strengths of chain-of-thought (CoT) and retrieval-augmented generation (RAG) to decompose a complex problem into simpler steps and apply retrieval to improve factual correctness. These methods work well on straightforward reasoning tasks but often falter on challenging tasks such as competitive programming and mathematics, due to frequent reasoning errors and irrelevant knowledge retrieval. To address this, we introduce Critic-guided planning with Retrieval-augmentation, CR-Planner, a novel framework that leverages fine-tuned critic models to guide both reasoning and retrieval processes through planning.\nCR-Planner solves a problem by iteratively selecting and executing sub-goals. Initially, it identifies the most promising sub-goal from reasoning, query generation, and retrieval, guided by rewards given by a critic model named sub-goal critic. It then executes this sub-goal through sampling and selecting the optimal output based on evaluations from another critic model named execution critic.\nThis iterative process, informed by retrieved information and critic models, enables CR-Planner to effectively navigate the solution space towards the final answer.\nWe employ Monte Carlo Tree Search (MCTS) to collect the data for training the critic models, allowing for a systematic exploration of action sequences and their long-term impacts.\nWe validate CR-Planner on challenging domain-knowledge-intensive and reasoning-heavy tasks, including competitive programming, theorem-driven math reasoning, and complex domain retrieval problems. Our experiments demonstrate that CR-Planner significantly outperforms baselines, highlighting its effectiveness in addressing challenging problems by improving both reasoning and retrieval." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "reasoning", "planning", "retrieval-augmented generation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/32a92a4c5644af406f32a31b8cb65fb2d468d85d.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Can We Further Elicit Reasoning in LLMs? Critic-Guided Planning with Retrieval-Augmentation for Solving Challenging Tasks" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4KKqHIb4iG
Backpropagation-free training of neural PDE solvers for time-dependent problems
main
Active
neural PDE solvers;time-dependent partial differential equations;random feature networks;backpropagation-free training
learning on time series and dynamical systems
3;3;5;5;6
4;4;4;5;2
2;1;3;3;3
2;2;3;2;3
2;3;3;3;3
4.4
3.8
2.4
2.4
2.8
-0.442269
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I hope to confirm with the authors that if you claim supremacy in any metric of the proposed method compared to traditional FEM solvers?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "- The writing is clear and detailed.\n- The experiments are rich in problem types, specific difficulties, and baseline comparisons." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors propose training neural PDE solvers by variable separation and random sampling of neural network weights. The neural network ansatz is utilized for the spatial domain, and the system evolving in time is solved by classical ODE solvers. Extreme learning machines and adaptive sampling techniques (SWIM) are applied for better training efficiency. An SVD layer is introduced to improve the condition number of the associated ODE. It is claimed that the proposed method outperforms PINN by 1 to 5 orders of magnitude in time efficiency and accuracy, for PDEs including Advection, Euler-Bernoulli, Nonlinear diffusion, and Burgers'." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Meaning no offense, but I think researchers in AI4PDE with more AI background will think of this work as a huge step backward. The essence of deep neural networks is their surprisingly good performance in approximating high-dimensional functions, and the efficiency of backpropagation in implementing neural networks with huge amounts of parameters. Surely there are still issues even if we can obtain the gradients cheaply, but zeroth-order optimization, according to my personal judgment, cannot be the solution because it will only scale poorly.\n- For the experiments, the spatial dimension is 1 or 2, and small in range. It would be interesting to see some results for problems huge in space." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "What is the n-width of the problems considered (as given by the spectral decay of the snapshot matrix)?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The presentation is clear and the literature review is thorough and provides a good introduction.\n- The method shows strong performance on the chosen benchmarks" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors present a method of solving PDEs by parameterizing solutions fields with neural networks whose parameters depend on time. The integration scheme solves for the last layer cofficents. The basis functions, induced by the inner parameters, are generated via a data driven or data agnostic way." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The motivation for the method is not totally clear. Introducing neural networks to solve PDEs where the parameterization evolves nonlinearly in time is motivated by breaking the kolmogorov n-width as in [1,2,3,4]. In this work the parameterization still evolves linearly in time. The neural network is only used to choose a basis. So it is unclear why one would pick this method over a traditional solver, which are extremely well understood in terms of convergence properties and accuracy. It seems to me the only reason would be to deal with complicated geometries? If so currently the paper does not devote enough attention to arguing and demonstrating this advantage. Additionally for these reason the comparison with PINNs is ill-chosen. The most appropriate comparison would be to traditional methods which also evolve linearly in time. While comparison is made to a finite-element method, this is not the best choice for some of the problems present. For the data-agnostic a reasonable spectral method should also be compared to and for the data-dependent method POD should be compared to.\n\nIt would be helpful to:\n\n- make more explicit the advantages over traditional, show this advantages clearly in the experiments.\n- add a comparison to a spectral methods for the data-agnostic case.\n- add a comparison to POD for the data-dependent case.\n\n\n[1] Evolutional Deep Neural Networks. Du et al.\n[2] Randomized sparse neural galerkin schemes for solving evoluation equations with deep networks. Berman et al.\n[3] Positional embeddings for solving PDEs with evolutional deep neural networks. Kast et al.\n[4] Breaking the Kolmogorov Barrier with Nonlinear Model Reduction. B Peherstorfer." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Could the authors clarify the absence of experiments involving higher-dimensional PDEs? Given the introduction’s emphasis on the limitations of mesh-based methods—particularly their impracticality in complex domains and high-dimensional spaces—it would be valuable to see examples where the proposed method effectively addresses these challenges. Higher-dimensional cases are particularly relevant to machine learning applications, where scalability in complex domains is critical." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The authors propose a backpropagation-free method that leverages random sampling techniques like Extreme Learning Machines (ELM) and Sampling Where It Matters (SWIM) to address the inefficiencies of traditional backpropagation, especially for complex time-dependent PDEs.\n\n2. The paper reports significant speed gains in training time, with improvements of up to 5 orders of magnitude over standard PINN approaches.\n\n3. Specialized handling of boundary conditions and separation of variables for time-dependent PDEs are some of the contributions that could impact future neural PDE solvers.\n\n4. The authors demonstrate extensive benchmarking across a range of PDEs with different challenges, showing superior performance in terms of speed and accuracy.\n\n5. The paper is well-written and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a method for training neural PDE solvers without backpropagation, which aims to improve efficiency in solving time-dependent partial differential equations (PDEs). The authors integrate two main ideas: separating space and time variables and randomly sampling weights and biases in hidden layers. By reformulating the PDE as an ordinary differential equation (ODE) using neural networks for spatial components, they leverage traditional ODE solvers for time evolution. The approach is benchmarked against standard backpropagation-based Physics-Informed Neural Networks (PINNs). It shows improvements in accuracy and speed on complex PDEs involving non-linearities, high-frequency temporal dynamics, and shocks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The authors have mentioned the limitations of their method and share possible directions to follow in future work." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Experiments.\n- The boundary conditions are approximated \busing a boundary-compliant layer. For instance, in the case of periodic BC, the authors approximate $\\sin(kx)$ and $\\cos(kx)$ by applying a linear transformation to the basis function. However, this raises the question: what advantage does the proposed method offer compared to just using $\\sin(kx)$ and $\\cos(kx)$ as basis functions, or P1, P2 basis functions in FEM? A numerical comparison in this scenario would be helpful.\n- It appears that $C(t)$ is calculated by multiplying the pseudo inverse of feature matrix $[\\Phi(X),1]$, where $X$ contains all the collocation points. In cases of high dimensionality $d>>1$ where $N>>1$ to cover the entire domain, there may be significant computational demands. Further discussion and experiments on the computational cost in high-dimensional settings would be needed.\n\n2. Theoretical contributions\n- Does ELM possess a universal approximation property? If so, can this be generalized to the neural PDE solver setting?\n\n3. Limited applications\n- As the authors mention, the method cannot be applied to grey-box or inverse problem settings. Given this, what advantage does the mesh-free nature provide?\n- If the pseudo-inverse calculation for $[\\Phi(X),1]$ becomes computationally expensive, especially in high-dimensional problems, what practical benefit does mesh-free implementation offer?\n- Overall, what advantages does the proposed method offer over mesh-based approaches? In many cases presented in the paper, mesh-based methods achieve superior test accuracy with shorter training(computing) times." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Significantly lower relative error compared to PINNs\n- Substantially faster training speed than PINNs\n- Achieves both improvements without backpropagation while retaining a mesh-free approach" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a backpropagation-free training algorithm for a neural partial differential equation solver, utilizing the Extreme Learning Machine (ELM) framework. The method reformulates the partial differential equation (PDE) as an ordinary differential equation (ODE) problem through the separation of variables, which is then solved using classical ODE solvers. Numerical experiments show that the proposed method outperforms traditional PINNs in both test accuracy and training speed." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The experiments are insufficient to fully support the authors' claims.\n- The paper lacks theoretical contributions.\n- The proposed method has a limited range of applications, which restricts its overall contribution." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Is the proposed method able to handle PDEs with higher-order time derivatives?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The proposed method is novel and provides a distinct method to solve time-dependent PDEs other than classical numerical methods and PINNs.\n- The experiment results show that te proposed method outperforms PINNs by orders of magnitude of accuracy; the accuracy is even comparable to classical numerical solvers.\n- The authors also provide techniques to satisfy boundary conditions and improve the condition number of the associated ODE." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes to use a hybrid framework consisting of a neural network ansatz and a classical ODE solver to solve typical time-dependent PDEs. Specifically, the neural network ansatz features separation of spatial and temporal variables, and the parameters of this network is randomly sampled rather than trained with back propagation. Numerical experiments are conducted to verify the high accuracy and reduced training time of the proposed method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper should consider add more backgrounds about the random-sampling methods of neural network weights. Without back-propagation, how does this random-sampling of weights influence the final solution of the proposed method? As can be seen in Table 2, the standard deviations of your proposed method is relatively larger than PINNs, although the accuracy is significantly better. \n- The paper should add some ablation studies to provide more insight about each component of the proposed method. For example, the necessity of the SVD layer, the influence of number of hidden neurons.\n- It would add more practicabillity of the proposed method by providing more detailed comparisons between ELM-ODE and SWIM-ODE. Is one strategy better than another, or one should choose between these two strategies based on the PDE to tackle?" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a backpropagation-free algorithm to train neural PDE solvers for time-dependent problems." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024backpropagationfree,\ntitle={Backpropagation-free training of neural {PDE} solvers for time-dependent problems},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4KKqHIb4iG},\nnote={under review}\n}" }, "abstract": { "value": "Approximating solutions to time-dependent Partial Differential Equations (PDEs) is one of the most important problems in computational science. Neural PDE solvers have shown promise recently because they are mesh-free and easy to implement. However, backpropagation-based training often leads to poor approximation accuracy and long training time. In particular, capturing high-frequency temporal dynamics and solving over long time spans pose significant challenges. To address these, we present an approach to training neural PDE solvers without back-propagation by integrating two key ideas: separation of space and time variables and random sampling of weights and biases of the hidden layers. We reformulate the PDE as an ordinary differential equation using a neural network ansatz, construct neural basis functions only in the spatial domain, and solve the ODE leveraging classical ODE solvers from scientific computing. We demonstrate that our backpropagation-free algorithm outperforms the iterative, gradient-based optimization of physics-informed neural networks with respect to training time and accuracy, often by several orders of magnitude using different complicated PDEs characterized by high-frequency temporal dynamics, long time span, complex spatial domain, non-linearities, and shocks." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "neural PDE solvers", "time-dependent partial differential equations", "random feature networks", "backpropagation-free training" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/00d58aa9d9ea0d94648b58effd93cbe51a0c2d31.pdf" }, "presentation": null, "primary_area": { "value": "learning on time series and dynamical systems" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/acf7faa5e4b7bdb1629658074d00b0b05b59ef3f.zip" }, "title": { "value": "Backpropagation-free training of neural PDE solvers for time-dependent problems" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4Kw4KAoVnx
Sparse MeZO: Less Parameters for Better Performance in Zeroth-Order LLM Fine-Tuning
main
Active
Zeroth-Order Optimization
optimization
5;5;5;5
4;4;3;3
2;2;3;3
2;2;2;2
4;3;2;4
5
3.5
2.5
2
3.25
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. Could the authors clarify the computational cost associated with generating the dynamic mask at each iteration and how it compares to MeZO in practice? such as a wall-clock time wise comparison similar to Figure 1\n\n2. How sensitive is Sparse-MeZO’s performance to the choice of layer-wise sparsity threshold, and what considerations guided the threshold selection?\n\n3. Can you provide detailed threshold hyperparameters for reproducibility?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. Incorporating sparsity and MeZO is an interesting direction for performance improvement. \n\n2. The paper offers good empirical validation, showing clear improvements over baseline methods across a range of fine-tuning tasks.\n\n3. The paper is structured logically, making it easy to follow the motivation and methodology" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes Sparse-MeZO, a memory-efficient zeroth-order optimization (ZO) technique for fine-tuning LLM by selectively optimizing a subset of parameters, rather than all. Based on MeZO, Sparse-MeZO introduces a sparse mask that targets smaller, noise-resilient weights, thereby mitigating gradient estimation noise, improving convergence speed, and reducing memory usage. Key results demonstrate Sparse-MeZO’s efficiency, achieving faster convergence and improved performance over MeZO, with the ability to fine-tune models like LLaMA-30b on limited hardware resources (e.g., a single A100 GPU)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper lacks details on how thresholds for selecting small-magnitude weights are determined, how they vary across tasks, and whether they require re-tuning for different settings.\n\n2. It’s not clear if masking and selecting small-magnitude weights specifically benefit zeroth-order optimization more than generic fine-tuning (first-order methods). Since subset selection often improves generalization, it would be needed to evaluate this effect.\n\n3. There is no specific numbers regarding the computational overhead introduced by dynamic masking and threshold calculation.\n\n4. The motivation for why zeroth-order optimization particularly benefits from small-magnitude weights lacks enough theoretical or empirical support. An ablation study showing this effect more clearly would strengthen the argument." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Mask Updates per Iteration: Are the masks updated in each iteration? The concept of dynamic masking appears similar to Dynamic Sparse Training (DST), where the frequency of mask updates is a critical factor affecting performance [1,2]\n\n- Can Sparse MeZO be integrated with LoRA, similar to how MeZO has been combined with LoRA in previous work?\n\n- When claiming the 3.5× speed-up in Figure 1, are both methods using the same learning rate, or does the speed-up depend on differing learning rates?\n\n- Determining the masks is a key component of this work, and the authors use a simple magnitude-based method. Do you believe that employing more advanced methods like Wanda [3] or SparseGPT [4] could further enhance performance?\n\n[1] Do we actually need dense over-parameterization? in-time over-parameterization in sparse training, ICML 2021\n\n[2] Rigging the Lottery: Making All Tickets Winners, ICML 2020\n\n[3] A Simple and Effective Pruning Approach for Large Language Models, ICLR2024\n\n[4] SparseGPT: Massive Language Models Can Be Accurately Pruned in One-Shot" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Interesting Idea: Proposes Sparse MeZO, selectively applying zeroth-order optimization to improve memory efficiency.- \n- Good Performance: Achieves significant gains in accuracy and convergence speed over existing methods.\n- Simple but Efficient Algorithm: Offers a straightforward yet effective approach for fine-tuning large models on limited hardware." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces Sparse MeZO, a memory-efficient optimization method for fine-tuning large language models. By selectively applying zeroth-order updates to a carefully chosen subset of parameters, it reduces estimation errors inherent in zeroth-order methods. This approach improves both performance and convergence speed without increasing memory usage, enabling efficient fine-tuning of large models like LLaMA-30b on limited hardware resources." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Some details need to be clarify, as explained in Questions" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* Given a fixed training budget does SparseMeZO outperform fine-tuning with adapters (+ gradient accumulation)?\n\n* I think it would be insightful to add a comparison between the gradient updates with standard SGD optimization and SparseMeZO on a smaller scale model (say of size ~1B parameters)." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* The fact that instability is caused by the noisy optimization of weights with large magnitude appears to be a useful practical insight. \n\n* SparseMeZO consistently improves upon dense MeZO optimization in different setups. \n\n* The proposed memory-efficient implementation of S-MeZO incurs almost zero memory overheads relative to original MeZO and allows tuning large models on a single GPU." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work proposes zeroth order memory-efficient optimization with sparse updates of model parameters for fine-tuning. Sparse updates are shown to facilitate better convergence than vanilla MeZO. The introduced method is evaluated on several tasks from SuperGLUE and several model architectures - Llama-1, Mistral, OPT." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The overall contribution is incremental as the work adds additional sparsification steps on top of the MeZO optimization algorithm. \n\n* Evaluation setup is outdated (as for Fall 2024). Llama-1 and Mistral-v0.1 were released more than a year ago, and current models (Llama-3, Llama-3.1, Qwen-2.5, gemma-2) have advanced dramatically in terms of quality since then. In addition, I would suggest testing the approach on a more challenging and representative fine-tuning setup, such as instruction tuning on Alpaca/OASST or any other instruction fine-tuning dataset to fully appreciate the efficacy of SparseMeZO. \n\n* Some important baselines are not taken into consideration. PEFT adapters (LoRA, DoRA) allow for significant memory reduction on optimizer states. Memory footprint on activations and gradients is not very large for relatively short sequences (which is the case for SuperGLUE tasks) and small batches. In addition, it can be further decreased by gradient accumulation at the cost of additional compute. I would suggest adding comparison with these options and reporting memory usage in Table 3." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "See above." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper is well motivated, the approach is novel and the results are convincing. I particularly enjoyed the writing style and clear structure of the paper, it was mostly easy to follow, enjoyable to read and the reader was not left with many questions. The proposed method is interesting, however there are open questions, which I will discuss below. Overall, I think this is a good paper that however needs to resolve several questions before being suited for publication." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper builds on the Memory-efficient Zeroth-order optimizer (MeZO) and introduces a new method, called Sparse MeZO. While MeZO is successful in training a neural network only with forward passes and by estimating the batch gradient using a finite difference scheme, there is still a significant gap between MeZO and common first-order methods. The authors aim to narrow this gap by updating only a sparse subset of the model parameters in each iteration. In particular, they propose to only update the smallest weights in each layer. They provide efficient implementations of doing so and show that Sparse MeZO performs better than multiple baselines, including MeZO." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While I appreciate research in this important field, I have several concerns regarding soundness, clarity and contribution of the work, which I explain in detail below. I hope that these remarks are helpful in improving the work and I am happy to discuss my evaluation.\n\n### Soundness\n- In Figure 2b, you compare the potential loss increase on a different batch than the one used by MeZO, you then conclude that \"zeroth-order gradient estimates suffer from overfitting or noise\" (Line 189). I think to really conclude this, you would need to compare the MeZO gradient on a batch with the actual gradient on the same batch. The very same statement could be true for e.g. SGD, especially if the batch size is small and the variance of the gradient is high. I would greatly appreciate to see this figure instead or in addition to Figure 2b (potentially in appendix).\n- In Line 197, you write that \"all parameters share the same value\". What is meant by this? The formula you say that \"is used to estimate the gradient\" only gives the magnitude of the multiplier, the actual gradient vector is given by $z$, where definitely not all elements are the same. Apart from that being a false premise, you then conclude that not all parameters are optimized in the true gradient direction, which I found a surprising statement. As far as I understand it, MeZO or variants randomly sample a direction along which the gradient is estimated, constrained to that particular direction. It will with very high probability not be anywhere close to the true gradient direction, by virtue of the fact that you are random sampling the directional derivative. Please clarify this, maybe I am mistaken here. \n- In the same paragraph, you somehow derive that \"small weights are less impacted by noise corruption and can generalize better\" - it is not clear to my why you derive this conclusion, is it just by the fact that you empirically find this to work better? What is the mathematical intuition or even justification for this statement? I think to resolve this soundness issue, you have to answer to very important questions: First of all, why does it make sense to use less weights in the first place? For First-order Optimization this would be absurd, what is the difference here? Why should this work better than using all weights? And secondly, why would exactly the smallest weights have the highest impact? I could potentially follow this argument if you were actually sparsifying the computed gradient, then one could argue with Taylor approximation or the like, but you are not doing this, you are instead sparsifying the weights. Why?\n- Figure 4 highlights the effect of sparsity on the final performance. It is not clear why this curve looks like this and why that would make sense. The authors should clarify why they think that sparsity in the weights during gradient estimation helps (apart from providing empirical evidence that is performs better). What sparsity 70% performs bad, then sparsity 75% performs pretty good, and sparsity 80% performs worse again seems very arbitrary and not intuitive. In the worst case, this is just a noise artifact.\n\n\n### Clarity and Experimental Validation\n- Line 123: What is \"SPSA\"? Is this defined anywhere in the paper?\n- In Line 260, you explain how you determine the mask. Why don't you set the threshold dynamically in each iteration? It's efficient and seems like the obvious thing to do.\n- In Table 1, you report values for MeZO-LoRA. While I might have a vague idea of what that could be, this seems to be nowhere explained nor defined. Is this known from the literature or was this just missed?\n- Are the numbers in the tables reported with respect to the same overall runtime or number of iterations? If MeZO variants are faster than e.g. FT, it would be nice to see how they compare on the same runtime.\n- In the appendix (e.g. F), you refer to Lemma 3.2. What exactly is this, where is this defined? The same holds for the following appendix sections, I think these are referring to theorems/lemmata that do not exist or are not defined with the same numbering style. Please clarify.\n\n### Contribution\n- As outlined above, I think the contribution of the work is a major issue here. I fully acknowledge that Sparse MeZO achieves better results and is in that sense a meaningful contribution. However, the derivation of the method seems to be at least somewhat vague, it lacks justification apart from achieving better results. There is not much insight to gain since the paper lacks mathematical justifications, or at least intuitions. The derivation from gradient noise seems to be not very rigorous, at least to me. I hope that the authors can convince me otherwise.\n\n\n### Minor Remarks\n- Section 4.1 uses \\citet everywhere where I think \\citep is intended, this hinders readability.\n- Line 483: There is a typo, I guess it should be \"on a single A100 GPU\"." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024sparse,\ntitle={Sparse Me{ZO}: Less Parameters for Better Performance in Zeroth-Order {LLM} Fine-Tuning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4Kw4KAoVnx},\nnote={under review}\n}" }, "abstract": { "value": "While fine-tuning large language models (LLMs) for specific tasks often yields impressive results, it comes at the cost of memory inefficiency due to back-propagation in gradient-based training. Memory-efficient Zeroth-order (MeZO) optimizers, recently proposed to address this issue, only require forward passes during training, making them more memory-friendly. However, compared with exact gradients, ZO-based gradients usually exhibit an estimation error, which can significantly hurt the optimization process, leading to slower convergence and suboptimal solutions. In addition, we find that the estimation error will hurt more when adding to large weights instead of small weights. Based on this observation, this paper introduces Sparse MeZO, a novel memory-efficient zeroth-order optimization approach that applies ZO only to a carefully chosen subset of parameters. We propose a simple yet effective parameter selection scheme that yields significant performance gains with Sparse-MeZO. Additionally, we develop a memory-optimized implementation for sparse masking, ensuring the algorithm requires only inference-level memory consumption, allowing Sparse-MeZO to fine-tune LLaMA-30b on a single A100 GPU. Experimental results illustrate that Sparse-MeZO consistently improves both performance and convergence speed over MeZO without any overhead. For example, it achieves a 9% absolute accuracy improvement and 3.5x speedup over MeZO." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Zeroth-Order Optimization" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6ec020f892bf140449f916b445c31289cd76ef71.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Sparse MeZO: Less Parameters for Better Performance in Zeroth-Order LLM Fine-Tuning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4LiegvCeQD
IEL: Intra-Model Ensemble Learning For Single Sample Test-Time Adaptation
main
Active
Test-Time Adaptation;Ensemble Learning;Entropy-Regularization;Knowledge Distillation
transfer learning, meta learning, and lifelong learning
1;3;3;3
4;3;4;5
1;1;2;1
2;2;2;3
2;2;2;2
2.5
4
1.25
2.25
2
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "There is no potential violation of the CoE." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses part." }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "- The paper addresses a challenging TTA settings where only a single, unlabeled sample is given for adaptation during test time.\n- The paper adopts an interesting approach of ensemble learning to dynamically optimize a group of learners (pre-trained models), showing improved TTA performance." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces an intra-model ensemble learning method for single sample test-time adaption. It minimizes the cross-entropy losses between the output with the highest confidence score and all other classifier's outputs. It optimizes all trainable parameters (except for BN layers) and requires only a single sample for TTA. It achieves improved performance on corrupted datasets including CIFAR10-C, CIFAR100-C and ImageNet-C." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The proposed algorithm offers no substantial improvement over existing ensemble learning methods. It simply combines 1) selecting the most confident prediction and 2) cross-entropy minimization of ensemble models. Technical contributions to both ensemble learning and single-sample TTA remain limited.\n- The paper lacks sufficient experimentation to demonstrate the proposed method’s effectiveness. It only compares results across different backbone architectures, without considering other baseline methods suitable for single-sample TTA, such as NOTE [1] and REALM [2]. Additionally, it does not explore alternative TTA settings, such as continual TTA, where incoming domains continuously change.\n- The experiment section (Sec. 4) requires more careful writing and clarification. For instance, it should include a clear definition and detailed description of the tuning set samples, as well as more comprehensive experiments, including ablation studies, to examine the correlation between the number of ensemble models and TTA performance.\n- In Section 4.1, the authors state that no catastrophic forgetting was observed on the ImageNet-C dataset. However, this is unlikely to be accurate since only 7,000 samples per corruption type from ImageNet-C were used for evaluation. More rigorous experiments and substantiated claims are needed.\n- As noted in the limitations, the proposed method requires significant computational resources to utilize multiple pre-trained networks. However, the paper does not provide any empirical analysis or comparison of computational cost or adaptation latency.\n\n[1] Gong et al., Note: Robust continual test-time adaptation against temporal correlation. NeurIPS'22. \\\n[2] Seto et al., REALM: Robust Entropy Adaptive Loss Minimization for Improved Single-Sample Test-Time Adaptation. WACV'24." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see the weaknesses above that are directly actionable. No further Qs." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The technique is simple and elegant. It generalises the typically used concept of a student/teacher setup which typically uses a single model, and is straightforward to scale in practice (by increasing the size of the ensemble)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose a test-time adaptation (TTA) technique based on model ensembling. A set of models is simultaneously trained, and for each sample, the model with highest confidence is used as a teacher for all student models. Updates happen via standard cross-entropy. The authors show improvements over a non-adapted baseline model across CIFAR10/100-C and ImageNet-C." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**Summary**\n\nThe presentation and investigation done in the paper is well below the bar for ICLR. There are no baselines, and the results are not well presented and contextualised. The introduction to the paper is lengthy, and should be made more crisp. The contribution statement is not accurate and needs to be adapted to what is actually shown in the paper. A lot of important controls and analysis experiments are missing to back up the claims. While I think that the general idea has potential, it needs a much better investigation to be considered for acceptance. A list of actionable points is given below. \n\nFor full transparency, while I would be happy to increase the score if my points are addressed, I doubt this is doable within the rebuttal period. Depending on the other reviews, I would already suggest that the authors consider a full revision of the paper and submission of their paper to the next conference. That being said, I think this could become a very nice paper if more work is spent on the experiments, and I would be happy to iterated with the authors during the discussion period.\n\n**Major**\n\n**W1** There is a very lengthy introduction. The method section only starts on page 5. Before, a fair amount of related work is cited (great), but without considering any of that later as methods for comparisons.\n\n**W2** A naiive baseline is omitted: What happens if a state-of-the-art adaptation technique like EATA, or even older/simpler techniques like TENT are used, but adapting $n$ models in parallel, and then ensembling of the outputs?\n\n**W3** Section 3 needs a rewrite and improvements to clarity. For example, basic metrics like the cross-entropy loss are abbreviated with an extra symbol $\\delta$, it would be better to clearly define the loss formulation used instead.\n\n**W4** The contributions reference “continual learning” (l. 130, l. 134), but there is no experiment backing this up. The reference should be removed.\n\n**W5** Claim 3 in the contributions (ll. 135-136) states that TTA requires a full batch. This is misleading or even wrong. There are certainly techniques that measure improvements when only a single sample is available (= model is adapted on that sample, and performance is measured) before the domain changes (e.g. Batch norm adaptation, or MEMO). However, in the setting considered here, model updates still accumulate on the same domain. In that setting, any TTA technique, like TENT, can be made suitable for the discussed setting by only updating every N steps (where N is the batch size), collecting the samples in the meantime.\n\n**W6** Claim 4 in the contributions is not at all corroborated by results in the paper and should be dropped.\n\n**W7** The paper needs to add recent methods to compare to. The current tables provide a lot of irrelevant details, and should be compressed. Instead of listing all different domains, it would be better to run a sufficient number of baseline models on the considered datasets to contrast the results. When doing so, it could be interesting to investigate whether the ensembling mechanism proposed is *orthogonal* to other adaptation methods: E.g., if we consider the EATA loss function for ensembling, does this improve over EATA?\n\n**W8** Analysis should be added: What happens if the number of models in the ensemble varies? How robust is this technique to common problem in cross-entropy based adaptation (model collapse) when training over long time intervals?\n\n**W9** In case the authors decide to keep the continual learning claim: How does the model perform in the continual adaptation settings used in CoTTA or EATA, and how does the model perform on long-term adaptation on CCC (Press et al., 2023)?\n\n**Minor**\n\n- Related work in l. 178: Batch norm adaptation (Schneider et al., 2020) re-estimates batch norm statistics, and this was shown to also work on single samples. Test time training (Sun et al., 2019) also considers single samples. TENT, in contrast, *additionally* performs entropy minimisation akin to the cross-entropy loss discussed in the study.\n- Figure 1 misses a legend. The color code could be adapted, and e.g. corruptions of the same type could get the same color with different marker colours. That would make the plot more interpretable than the current 15 random colours." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "No ethics review needed." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* What are the key technical differences of the proposed method to simply applying gradient-based self-training to an ensemble classifier, one sample at a time? \n\n* What are the key technical differences to prior work on ensemble self-training, such at [Ghosh, 2021]?\n\n* Please clarify these claims: 1) page 3: \"we minimize the diversity of the ensemble [...] in a way that facilitates generalization of the source training domain to the new testing domain.\" What does \"facilitates generalization\" mean here? Just higher test accuracy? In what way is lower *diversity* of the ensemble an important factor for that? 2) In the conclusion: \"member models are optimized and improved like in knowledge distillation and create an upward spiral effect\". What do you mean by upward spiral effect? E.g. from Figure 3 it appears that model accuracy goes up and down over epochs. \n\n* How were the method's hyperparameters chosen, given that standard model selection is not possible in the test-time adaptation setting?\n\n* Which variant/subset of ImageNet was used? Is the \"test\" data the actual \"test\" set or the \"validation\" part?\n\n* The manuscript emphasizes a batchsize of 1, but multiple *epochs* are run on the adaptation data. Does this mean that your method must buffer all test data, or at least see it repeatedly, such that statistics can be collected? Wouldn't batch-based TTA techniques be also applicable then?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Strengths:\n* the studied problem is realistic and relevant\n* the propose method makes sense and is simple enough for practical use\n* experiments show improved accuracy by the proposed test-time-adaptation" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The submission describes a method for adapting an ensemble of classifiers to a new data distribution using only unlabeled data. Specifically, the classifiers are trained sequentially soft pseudo-labels, which are the output of the model that has the highest predicted probability for the majority class across classifiers. Experiments are performed on standard datasets (CIFAR10, CIFAR100, ImageNet) with synthetic distribution shifts for an ensemble consisting of 5 pre-trained image classification models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Unfortunately, the submission has a number of weaknesses. \n\n* incomplete method\n\nThe method works iteratively but the submission does not provide a termination condition. This is a not minor issue, as the manuscript makes it sound: without a termination condition, the method cannot be run in practice, and because no labeled data is available at test time, standard techniques, such as checking for a drop of validation accuracy, cannot be applied, either. \n\n* lack of clarity in contribution and scientific comparison to prior work\n\nThe submission almost exclusively showcases the proposed method itself, but it does not put the method into context and competition with prior or alternative techniques. This leaves the novelty and relevance of the contribution unclear. Specifically, for a publication at a scientific venue, I expect that the novel aspect of the proposed method is clearly described, contrasting it to ideas and techniques that already existed in previous works. The manuscript does not do a good enough job doing so. As related work it concentrates almost exclusively on recent papers from the application domain of test-time adaptation with deep networks. However, ensemble methods have been studied extensively in machine learning, e.g. [Alaydin, \"Introduction to Machine Learning\", 2014; Chapter 17], and self-training methods for learning from unlabeled data also have a long history, going back at least to [Fralick, \"Learning to Recognize Patterns without a Teacher\", 1965], and having emerged many times afters, e.g. in the context of semi-supervised learning (e.g. Chapelle \"Semi-Supervised Learning\", 2006) and also test-time adaptation, e.g. [Royer et al, \"Classifier adaptation at prediction time\", 2015] . Even for adapting ensemble methods self-training was proposed before, e.g. [Ghosh et al, \"Self Training with Ensemble of Teacher Models\", 2021]. In light of extensive prior work, the manuscript should make clear what the technical novelty of the proposed method is. \n\n* lack of baselines in experiments \n\nThe experiments evaluation presents results for the proposed method, but it does not contrast them against prior works. Consequently, the reader only learns that using proposed method often (but not always) works better than not doing test-time adaptation, but if the method is better than already existing methods for test-time-adaptation. It would also be important to see at least a comparison to obvious baselines, such simply self-training each network individually. For the specific choices made, e.g. using the majority vote prediction as target label but the softmax scores of the most confidence classifier, an ablation study would be needed if this is actually useful, or if maybe using hard labels or the ensemble average confidence would work equally well (or better).\n\n* unsubstantiated claims\n\nThe manuscript contains factual claims, e.g. about design choices that are not self-evident but also not substantiated with evidence. Some of these appear misleading and/or are not credible, see \"Questions\". The counterexample on page 4 seems to be a misunderstanding of prior work. Claims in the literature are not that diversity is *necessary* for a good ensemble. Indeed, as the submission states, an ensemble consisting of many copies of a perfect classifier are still perfect. But rather, diversity of prediction *mistakes* between models in an ensemble can provably beneficial accuracy. Without any errors, that notion is not defined. But if mistakes do occur, the variance of predictions is decreased if errors are negative correlated (e.g. (17.5) in [Alpaydin, 2014]). \n\n* shortcomings in the experimental protocol \n\nSeveral aspects about the experimental evaluation are unclear or misleading (see questions below). \n- The reported accuracy value in Tables 1-3 are \"highest accuracy improvements\" over all epochs.\nThat means they are not unbiased estimates, but potentially overconfident. \n- The description of ensemble elements is not clear from the text, it currently seems only provided in the Table headers. \n- The specified regularization constant $\\alpha=10e^{-11}$ (which should probably be $\\alpha=10^{-11}$) is so small that no regularizing effect is mathematically possible even after hundreds of thousands of steps. I would expect $\\alpha=0$ to work equally well. \n- The exact variant of \"ImageNet\" dataset used is not clear. Best provide a source how the data was obtained and prepared. \n- The results table lists differences in accuracy, but not absolute accuracy numbers, which would be useful to judge e.g. if the base classifiers are suitable for the task. \n- It is not specified how the method's hyper-parameters were selected. \n- Given the mixed positive and negative results, a test of significance should be performed if the reported results are not equally well explained by random chance (e.g. Wilcoxon signed rank). \n\nFurther comments:\n- I found the analogies with human learning or research (for example the top of page 3) rather superficial, and I would recommend to remove those. \n- The reference section should be corrected. Many papers are listed as arXiv or without publication venue, which actually have been published, e.g. [Arazo et al, IJCNN 2020], [Bucila etal, KDD 2006], ..." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Is there any theoretical or empirical evidence that a minimizing the diversity of the ensemble is beneficial for TTA? \n\n2. In Figure 2, it shows that predictions with lower IEL loss have lower entropy. However, previous study [3], also cited in the paper (Lines 101–105, 173–176), claimed that reducing entropy of output during training can be problematic for TTA. This raises doubts about whether lower IEL loss actually leads to higher TTA performance, and I am curious whether there is any evidence to verify the relationship between IEL and TTA performance. \n\n3. Are there any experimental results that address the weaknesses mentioned (e.g., comparisons with previous studies, ablation study on the number of models, computational cost comparisons, and performance comparisons across various TTA scenarios)? \n\n4. If there are multiple source-trained models, what advantages does IEL offer over an approach that applies conventional TTA methods to each model individually and then performs an ensemble?\n\n[3] Entropy is not enough for test-time adaptation: From the perspective of disentangled factors" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "1. This paper is well written and easy to follow along with their reasoning.\n2. The idea of utilizing ensemble methods for TTA is simple, intuitive and easy to adapt.\n3. Experiments were conducted on a single-sample setting, which is one of the challenging tasks in TTA." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work introduces a new method for Test-Time Adaptation (TTA), Intra-model Ensemble Learning (IEL), that optimizes multiple models for ensembled inference. In the IEL framework, the output of the model with the highest probability for the class that received the most votes is set as the pseudo-label, and all other models are optimized to output this pseudo-label via minimizing the cross-entropy. This process minimizes the diversity within the ensemble and aligns the outputs of the models to ensure mutual agreement. Experimental results show the effectiveness of the proposed framework." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. There is insufficient justification for minimizing the ensemble diversity. Personally, I believe that as diversity increases, performance can be improved in general including TTA since the information is also increases. For example, in [1] which is also referenced in this manuscript, it was shown that increasing diversity can lead to a lower loss. Additionally, the counterexample of ensemble of models with 100% performance (Lines 161–166) is unrealistic and therefore inappropriate for supporting the claim. If there is a large distribution shift and the source-trained models perform poorly on target dataset, reducing diversity may actually have an adverse effect. In conclusion, it remains unclear how reducing ensemble diversity benefits TTA.\n\n2. Due to multiple assumptions, the scope of application of this research is limited. The authors assume there are multiple source-trained models (Lines 230–232), but it is questionable whether this assumption is easily met in practice. Furthermore, the assumption of stationary distribution shifts (Line 265-266) raises concerns about whether IEL would be effective in other TTA scenarios such as online imbalanced label distribution shifts or mixed distribution shifts [2].\n\n3. The experiments conducted do not sufficiently demonstrate the effectiveness of IEL. For example, the authors should include 1) performance comparisons with various previous works on TTA, 2) ablation study on the number of ensemble models, and 3) comparisons of computational costs associated with using multiple models. As mentioned earlier, including experiments across diverse TTA scenarios would also provide a more comprehensive understanding of IEL’s effectiveness.\n\n[1] A unified theory of diversity in ensemble learning\n[2] Towards stable test-time adaptation in dynamic wild world" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Ensemble and Entropy-Optimization based algorithm for adapting sets of pre-trained models to distribution shifted data." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024iel,\ntitle={{IEL}: Intra-Model Ensemble Learning For Single Sample Test-Time Adaptation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4LiegvCeQD},\nnote={under review}\n}" }, "abstract": { "value": "Test-Time Adaptation (TTA) problems involve adapting pre-trained models to new data distributions in testing time, with access to only model weights and a stream of unlabeled data. In this work, we present IEL, a method for adapting sets of independently pre-trained classifiers to distribution shifted data one sample at a time without labels. We minimize the cross-entropy between the classifier output that has the highest predicted probability for the majority voted class (a high confidence softmax) and all other models in a set of classifiers. The majority voted model that all others learn from may change from sample to sample, allowing the group to collectively learn from each other. Our method uniquely optimizes all trainable parameters in each model and needs only a single sample for adaptation. Using sets of independently pre-trained base classifiers with distinct architectures, we show that our approach can reduce generalization error for image classification tasks on corrupted CIFAR-10, CIFAR-100, and ImageNet while also minimizing the entropy of model outputs." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Test-Time Adaptation", "Ensemble Learning", "Entropy-Regularization", "Knowledge Distillation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/fa64481045e339fea8fd39930e5a287ab15dc720.pdf" }, "presentation": null, "primary_area": { "value": "transfer learning, meta learning, and lifelong learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "IEL: Intra-Model Ensemble Learning For Single Sample Test-Time Adaptation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4M0BRyGMnJ
Democratic Training Against Universal Adversarial Perturbations
main
Active
Neural network adversarial attack; Universal adversarial perturbation; Adversarial attack defense
alignment, fairness, safety, privacy, and societal considerations
5;5;6;6
4;4;2;5
2;3;3;3
2;3;2;3
2;3;3;3
5.5
3.75
2.75
2.5
2.75
-0.229416
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weakness." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper is well-written and easy-to-follow.\n2. The paper makes a commendable observation concerning the entropy spectrum in deep neural network layers, which is a significant contribution to the field and forms the basis for the proposed defense mechanism.\n3. The efficiency of the proposed democratic training method is noteworthy. It circumvents the need to generate UAPs during training, instead utilizing a limited number of epochs to identify low-entropy examples, which is a resourceful approach." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents an investigation into the defense against universal adversarial perturbations (UAPs), with a particular focus on targeted UAPs. The authors have made a notable observation regarding the entropy spectrum in hidden layers when UAPs are introduced, which serves as the cornerstone for their proposed 'democratic training' approach. This method aims to enhance the adversarial robustness of neural network models. The empirical results provided in the paper demonstrate the efficacy of the approach, as well as its ability to maintain the clean accuracy of the model.\n\nIn general, this paper is well-structured and presents a novel approach, which meets the basic criteria of ICLR conference. However, there are some aspects that could be improved or expanded upon to enhance the overall quality and impact of the paper." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The threat model employed in the experiments primarily utilizes gradient-based attack methods. These methods presuppose access to the model's parameters, aligning with white-box attack scenarios. This appears to be at odds with the assertion in Section 2.3 that adversarial knowledge does not extend to the internal parameters of the model. Clarification on this point would be beneficial.\n\n2. The comparison with adversarial training methods may require further refinement. Adversarial training aims to bolster adversarial accuracy by integrating adversarial examples with clean examples during training. Constraining the number of training epochs could result in an underfit model, which may not provide a fair benchmark. Additionally, it would be advantageous to include a comparison with the widely recognized TRADES method[1], which is absent from the current manuscript.\n\n3. The potential for adaptive attacks warrants consideration. If adversaries are aware of the defense strategy, they could tailor adversarial examples to bypass the defense. I know that in the threat model, no adaptive attacks are considered since the attackers do not know the internal details of the models. However, the chosen attack methods in the experiments inherently rely on gradient information. So I would suggest that the authors should consider the potential for adaptive attacks.\n\n4. The scope of the experiments is largely limited to datasets comprising natural images. It would be beneficial to extend the evaluation to smaller-scale datasets, such as CIFAR-10, to complement the findings and potentially leverage open-source robust models for further exploration of the neuron entropy spectrum concept.\n\n5. While the paper discusses various existing defensive methods against UAPs and includes experimental comparisons, a direct comparison with state-of-the-art methods is missing. It is recommended to condense the background section and incorporate a more thorough comparison with leading-edge techniques.\n\n6. Minor Issues\n (1) Please consider to reduce the margin between Figure 1 and the text.\n (2) Suggesting to add necessary notations (SR) from the main test to the Table 2 for better understanding.\n\n\n[1] Zhang, Hongyang, et al. \"Theoretically principled trade-off between robustness and accuracy.\" International conference on machine learning. PMLR, 2019." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "Yes, Privacy, security and safety" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How about performances of the method on ViT?\n2. What's the time cost of the method?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The use of entropy to reveal the dominance of UAPs and the concept of Democratic Training as a defense mechanism is innovative.\n2. The method was evaluated across various neural network architectures and benchmark datasets, which strengthens the claim of its general applicability.\n3. Unlike other defense methods, Democratic Training does not require architectural modifications, which makes it easy to integrate into existing systems" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a novel defense method called Democratic Training to mitigate the impact of Universal Adversarial Perturbations (UAPs) on deep neural networks. The authors observed that UAPs lead to an abnormal entropy spectrum in hidden layers, which shows the model's prediction is dominated by a small subset of features. Democratic Training mitigates this issue by increasing entropy to ensure model predictions rely on a wider range of features. The approach was evaluated on multiple models and datasets and was found to be effective in reducing attack success rates while preserving accuracy on clean data." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The evaluation focused primarily on benchmark datasets and common UAP generation methods. It would be beneficial to see how this approach performs on more sophisticated and adaptive attacks, such as adversarial examples generated in dynamic environments.\n2. The proposed method mainly works well on CNN. Authors should validate it in more types of networks, such as transformers.\n3. The method requires access to a small set of clean data for entropy measurement and training, which might not always be practical" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. For RQ3: did you adversarially train a model from scratch or just fine-tune a pretrained model with an adversarial training objective?\n2. I'm not very convinced by the claim in Lines 239-243 that says middle-layer feature entropy suggests the model's classification confidence." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The experiments are comprehensive.\n2. The proposed defense is attack-agnostic which is more practical and efficient.\n3. The proposed defense largely reduced the targeted attack success rate.\n\nI tend to accept this pape. However, since I'm not familiar with UAP attack and defense baseline methods, I will listen to other reviewers and public comments and then decide." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "To improve neural network robustness to targeted UAPs, this paper proposed an adversarial training-like method that fine-tunes a pretrained model to reduce middle-layer entropy." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. UAP attacks evaluated in the paper were published in 2018,2019,2020 and seem out-of-date.\n2. After democracy training, there is still a gap between ``AAcc.'' and clean accuracy. I wonder about the effectiveness of democracy training against non-targeted UAPs.\n3. Average results in Table 4\\&5 are ambiguous since there can be a large bias among different networks." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "no" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1: The author obtained the idea of Democratic Training by studying the performance of UAP between different layers in network. I want to know why author said 'Democratic Training does not rely on generating UAPs and are thus not limited to specific UAP attacks.', As mentioned in the article, the UAP analyzed by the author is produced based on FA-UAP, so the properties analyzed should mainly for such kind of UAP, and the algorithm guided by this should also mainly target on FA-UAP. If it cannot be argued that all UAPs have such properties, then this statement seems unreasonable?\n\n2: The author did not provide a detailed explanation for $H(i)$ in algorithm 2 line 2, according to the equation (3), I think that the author is trying to say $H(i)$ is an entropy loss. Less strictly speaking, maximizing H(x) means that the various components of x are directly averaged as much as possible. So, is the goal of SG(SampleGenerator, Algorithm 2) equivalent to finding an adversarial sample with each component average? Is this a form of weakened PGD? If so, why is finding weaker adversarial samples beneficial for improving robustness? Why not directly target finding UAP for SG?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1, The setting is reasonable, author want to resist universal adversarial samples through small cost, this may be useful in some situation.\n2, Good experiment results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The author aims to improve the robustness of universarial adversarial by fine-tuning with a small amount of data, mainly by performing entropy based data augmentation to suppress the influence of general adversarial perturbations in a given model. Then the experiment was presented." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1, The method provided in the article is not novel, overall, it is still based on the 'min-max' methods. And in my opinion, it seems to be a weakened version of adversarial training.\n\n2, The symbols are somewhat confused. For example, in euqation (4), author use $L_{cce}$, I think this means cross entropy loss, but author did not introduce what is $L_{cce}$ before the euqation (4); in algorithm line 3, author define $I_b^{en}$ which is got by SampleGenerator, but do not use it in the following of algorithm, or maybe it have been wirtten as $i_{en}$ in line 4?\n\n3, I try to test the author's algorithm on CIFAR10(VGG16, budget 8/255), but I didn't get such a good result shown in table2, the SR has only decreased by about 10%. Author did not submit the code, so I hope the author can provide a detailed introduction to the settings for Algorithms 1 and 2 (For example, how to select the epoch, learning rate, hyperparameters), and it's best to provide a detailed explanation of each symbol and steps in the algorithm." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "A novel method to mitigate the effect of UAPs via democratic training." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024democratic,\ntitle={Democratic Training Against Universal Adversarial Perturbations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4M0BRyGMnJ},\nnote={under review}\n}" }, "abstract": { "value": "Despite their advances and success, real-world deep neural networks are known to be vulnerable to adversarial attacks. Universal adversarial perturbation, an input-agnostic attack, poses a serious threat for them to be deployed in security-sensitive systems. In this case, a single universal adversarial perturbation deceives the model on a range of clean inputs without requiring input-specific optimization, which makes it particularly threatening. In this work, we observe that universal adversarial perturbations usually lead to abnormal entropy spectrum in hidden layers, which suggests that the prediction is dominated by a small number of ``feature'' in such cases (rather than democratically by many features). Inspired by this, we propose an efficient yet effective defense method for mitigating UAPs called Democratic Training by performing entropy-based model enhancement to suppress the effect of the universal adversarial perturbations in a given model. \\emph{Democratic Training} is evaluated with 6 neural networks trained on 4 benchmark datasets and 4 types of state-of-the-art universal adversarial attack methods. The results show that it effectively reduces the attack success rate, improves model robustness and preserves the model accuracy on clean samples." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Neural network adversarial attack; Universal adversarial perturbation; Adversarial attack defense" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/e095a7d7577f3fbd41df74b2a759cc1c02fe072d.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Democratic Training Against Universal Adversarial Perturbations" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4MWUdp6deL
Learning Code Preference via Synthetic Evolution
main
Active
Code Generation;Large Language Model;Preference Learning;Evaluation
foundation or frontier models, including LLMs
5;5;5;6
5;4;4;3
2;2;2;2
2;2;3;3
4;3;4;3
5.25
4
2
2.5
3.5
-0.816497
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Q1: what are the backgrounds of the developers participating in the evaluation and data annotation, and how their potential biases may have affected the soundness of the approach and the evaluation? \n\nQ2: the larger LLMs have clear edges over the CodFavor improved small models with much lower costs than human evaluators. Why would they not be a better option than using CodeFavor to improve the smaller models?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper presents a significant advancement in the field of code preference learning by introducing the CODEFAVOR framework, which innovatively utilizes synthetic evolution data to train models that predict meaningful code preferences. The novelty lies in its dual focus on aligning human and model preferences with verifiable code properties, addressing a critical gap in existing research. Key contributions include the development of CODEPREFBENCH, a comprehensive benchmark with 1364 curated tasks that evaluate code based on correctness, efficiency, and security, thus providing a robust evaluation framework for future studies. The results demonstrate that CODEFAVOR can enhance model accuracy by up to 28.8% while being more cost-effective than larger models, highlighting its practical significance in improving code generation assessments. Additionally, the paper sheds light on the limitations of human-based assessments, emphasizing the need for model-based approaches in evaluating non-functional code properties, which further underscores the importance of the research findings." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper addresses the challenge of assessing code generation based on well-formed properties and aligning it with developer preferences, which has proven difficult in the context of Large Language Models (LLMs). To tackle this issue, the authors propose CODEFAVOR, a framework designed to train pairwise code preference models using synthetic evolution data, including code commits and critiques. Additionally, they introduce CODEPREFBENCH, a benchmark consisting of 1364 curated code preference tasks that evaluate three key properties: correctness, efficiency, and security, alongside human preferences. The main results indicate that CODEFAVOR significantly enhances the accuracy of model-based code preferences by up to 28.8%, while also demonstrating that these models can perform comparably to those with 6 to 9 times more parameters, all while being 34 times more cost-effective. Furthermore, the study highlights the limitations of human-based code preference assessments, revealing that a substantial percentage of tasks remain unsolved despite considerable time investment." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The problem formulation/setting can be improved in terms of clarity, motivation, and realism. The framework is proposed to serve code assessment purposes, i.e., judging automatically which version of code generated by a model from a prompt is preferred (i.e. more correct/secure/efficient) between a pair of two versions. The questions are (1) in what scenarios would these two versions be available, and (2) how realistic it is that there are such strong and discriminative contrasts between the two versions (i.e., correct versus wrong, fast versus slow, secure versus vulnerable). In a typical use scenario of LLMs for code generation, developers may feed the LLM with a prompt and get a response. Should they always ask the model for two responses? If so, the cost would double. More importantly, it is probably unlikely that there is a such contrast between the two versions of code generated from the same prompt---e.g., the two versions could be similarly good or bad. Learning preferences in a strongly differentiable pair of two responses does not seem to be realistic. If so, the paper may want to provide supporting evidence that this is the case. Or the problem itself is not motivated convincingly. \n\nAnother primary weakness is the heavy reliance on synthetic evolution data for training the CODEFAVOR framework. While synthetic data can be useful, it may not fully capture the complexities and nuances of real-world coding scenarios. This limitation raises concerns about the generalizability of the model's performance in practical applications, as the evaluation may not reflect actual developer preferences or code behavior in diverse environments.\n\nThe paper acknowledges the prohibitive costs and limitations of human-based code preference assessments, noting that despite significant time investment, a substantial percentage of tasks remain unsolved (15.1% to 40.3%) . This suggests that human evaluators may struggle with certain tasks, which could undermine the reliability of the human preference data used for comparison. The paper could benefit from a more in-depth exploration of these limitations and their implications for the overall findings.\n\nThe paper mentions that the reliability of using large language models (LLMs) as evaluators often hinges on their reasoning capabilities, which can be subject to inherent biases . This raises questions about the objectivity of the model-based preferences derived from LLMs, as biases could skew the results and affect the alignment with human preferences. A more thorough examination of potential biases and their impact on the findings would strengthen the paper's arguments.\n\nThe evaluation framework, CODEPREFBENCH, focuses on three specific properties: correctness, efficiency, and security. While these are important aspects, the paper does not justify the choices, among various other quality aspects of code, such as maintainability or readability. Also, it seems that each of these chosen properties is separately considered, yet in real-world scenarios developers need to balance multiple factors at the same time when choosing which code to adopt (e.g., code that is both secure and correct). The interplay among these potentially competing factors is not considered in the approach nor in the evaluation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "(1) In Table 1, to validate the effectiveness of the developed training framework, might it be helpful to add some baseline training approaches which also train the LLMs using the same training data used by CODEFAVOR?\n\n(2) In Table 1, might it be helpful to understand whether CODEFAVOR can further improve these Open-Weight Models with larger sizes?\n\n(3) Would the developed approach also be effective, if the developed approach is applied to some other LLMs for code related tasks (e.g., Code Llama, Lemur)?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "(1) The paper is well written and easy to follow.\n\n(2) The paper introduces a benchmark which can potentially be used by future papers.\n\n(3) The developed approach is evaluated using multiple LLMs, showing that the developed approach is generally effective.\n\n(4) The developed approach has good intuitions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes CODEFAVOR, a framework for training pairwise code preference models from synthetic evolution data, including code commits and code critiques. To evaluate code preferences, the paper introduces CODEPREFBENCH, a benchmark comprising 1364 rigorously curated code preference tasks to cover three verifiable properties - correctness, efficiency, and security - along with human preference. The evaluation shows that CODEFAVOR holistically improves the accuracy of model-based code preferences." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(1) In Table 1, it seems that the approaches in the rows are either LLMs, or LLMs with the training framework developed in this paper. To validate the effectiveness of the developed training framework, might it be helpful to add some baseline training approaches which also train the LLMs using the same training data used by CODEFAVOR?\n\n(2) In Table 1, considering that there is still a gap between the Open-Weight Models and Our Models and Baselines (i.e., LLMs used with CODEFAVOR), might it be helpful to understand whether CODEFAVOR can further improve these Open-Weight Models with larger sizes?\n\n(3) It might be helpful if the paper can show the effectiveness of the developed approach when the approach is applied to some other LLMs for code related tasks (e.g., Code Llama, Lemur)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Do you have concerns that the synthetic data generation methods, Commit-Instruct and Critic-Evol, may not fully ensure code correctness? If not, this raises another question: the quality of synthetic data is highly dependent on the LLM used to generate it. How do the experiments demonstrate that CODEFAVOR’s performance gains are due to the framework itself rather than simply distilling knowledge from a stronger LLM (Llama-3-70B-Instruct)?\n- Could you provide more details on the inference process for the evaluation results in Table 3? Specifically, how many samples were created for each problem, what temperature was used, and are the results statistically significant?\n- Could you elaborate on any aspects that emphasize the novelty of your work compared to previous studies?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* This paper contributes two code preference synthetic dataset and a CODEPREFBENCH, a collection of 1,364 carefully curated preference tasks, To evaluate code preferences labeled by various approaches.\n* This paper comprehensively quantify and conduct case studies on code preferences derived from human developers and LLMs.\n* CODEFAVOR models can match the preference accuracy of models that are larger by 6∼9×, while being cheaper by 34×" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes CODEFAVOR, a framework for training pairwise code preference models using synthetic evolution data generated from code commits and LLM critiques. This approach addresses the challenge of aligning code generation with developer preferences, focusing on correctness, efficiency, and security through a benchmark called CODEPREFBENCH, which includes 1,364 preference tasks. CODEFAVOR models achieve comparable performance to much larger models while being more cost-effective, and experiments reveal that human preferences often fall short in non-functional objectives like efficiency and security. The study provides insights into balancing model and human preferences, highlighting the potential limitations and strengths of each approach​." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The approach to synthetic data generation lacks originality, as creating datasets from git commits [1,6] and evolving from sampled code[2,3] are common practices in the field.\n- The pairwise modeling approach is also not particularly novel; using pairwise prompts, criterion-based prompting, and classification or generation labels [4,5,7] have been previously explored in other studies.\n- Additionally, there is concern that synthetic data generation may not fully ensure code correctness, as it heavily depends on the LLM used for critique and generation. The chosen model, Llama3-70B-Instruct, is relatively weak compared to state-of-the-art models and limited to only this single model.\n- Finally, it is challenging to determine whether the performance gains following CODEFAVOR training are due to the distillation of knowledge from stronger LLMs used in data generation or from the CODEFAVOR training itself.\n\n\n1. Jimenez, Carlos E., et al. \"Swe-bench: Can language models resolve real-world github issues?.\" arXiv preprint arXiv:2310.06770 (2023).\n2. Luo, Ziyang, et al. \"Wizardcoder: Empowering code large language models with evol-instruct.\" arXiv preprint arXiv:2306.08568 (2023).\n3. Wei, Yuxiang, et al. \"Magicoder: Empowering code generation with oss-instruct.\" Forty-first International Conference on Machine Learning. 2024.\n4. Dong, Yi, et al. \"Steerlm: Attribute conditioned sft as an (user-steerable) alternative to rlhf.\" arXiv preprint arXiv:2310.05344 (2023).\n5. Wang, Zhilin, et al. \"Helpsteer: Multi-attribute helpfulness dataset for steerlm.\" arXiv preprint arXiv:2311.09528 (2023).\n6. Ding, Yangruibo, et al. \"Crosscodeeval: A diverse and multilingual benchmark for cross-file code completion.\" Advances in Neural Information Processing Systems 36 (2024).\n7. Qin, Zhen, et al. \"Large Language Models are Effective Text Rankers with Pairwise Ranking Prompting.\" Findings of the Association for Computational Linguistics: NAACL 2024. 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. The Security score of human developers in Table 3 is only 59.7. Does this indicate that humans are not proficient at judging code security, even similar to random selection? \n\n2. Could you further explain “Scores within 1 percentage point of the highest” in Table 3, as well as the detailed measurement method for \"uncertain responses\"?\n\n3. The authors discovered that code comments may negatively affect model preferences, which is a bit strange and may be harmful to real-world applications. Is it possible to result from the class imbalance in comments (e.g., a higher proportion of comments in positive examples)? Could you provide the number of comments in positive and negative examples in the training and testing sets?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The synthetic dataset construction method (commits and critiques) is technically sound and novel to me.\n\n- The authors conducted a comprehensive evaluation of the method. In addition to correctness, which is the focus of many traditional code generation studies, the authors also assess efficiency, security, and human developer preference.\n\n- The authors put significant effort into the formatting of images and tables, which enhances the readability of the paper." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper aims to enable LLMs to better assess the quality of two code snippets by constructing a synthetic pairwise code preference dataset. The dataset is built using code commits (with pre- and post-commit versions as contrastive samples) and code critiques (the code snippet improved by a superior LLM as a contrastive sample). The authors have built benchmarks in Correctness, Efficiency, Security, and Human Preference to test the effectiveness of the proposed method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. My main concern is that the authors overlook many works on code ranking and do not provide any experimental comparison. Many statements, such as \"learning code preferences has been *largely under-explored*\", \"the *first* open recipe to train pairwise code preference models\", and \"understudied code generation domain\", appear to overclaim. To name a few: \n\n- A basic, training-free baseline is to compare the mean log probability of two code snippets and select the one with the highest probability, as in [1]. Furthermore, [2] also uses model likelihood for code selection. \n- Some research also explores training classifiers to choose the better code, as in [3].\n- The authors did not compare their work with the dataset in [4] mentioned in the Related Work section.\n- In addition, but less importantly, to better contextualize the paper, some words about recent advances in execution-based code selection [5,6,7,8] would be appreciated. Particularly, [8] also employs a generation task similar to this paper. Considering that the work is recent, this comparison is not necessarily required.\n\nSince the authors only reported the performance of the backbone LLMs and lacked empirical comparisons with advanced code selection methods, it is difficult to determine the relative improvement level of this work within related studies.\n\n2. Some training details in the paper require further clarification. For instance, does the classifier task operate on the next token in Equation (1)? If so, considering that the label tokens (\"A\" or \"B\") and the criterion $c$ are tightly connected without a delimiter or explicit prompt, how does the LLM recognize where the criterion ends to output label tokens?\n\n3. Since the authors collected both the training set and testing benchmarks, it's unclear whether they took decontamination steps to prevent test set leakage. If no decontamination was performed, analyzing the potential overlap between the training and test sets would be beneficial.\n\n**Minor comments**\n\n- The caption for Listing 1 is missing a period at the end.\n- It would be better to place Equation (2) right after line 141.\n\n**References**\n\n[1] Evaluating Large Language Models Trained on Code, https://arxiv.org/abs/2107.03374\n\n[2] Coder Reviewer Reranking for Code Generation, ICML 2023.\n\n[3] Fault-Aware Neural Code Rankers, NeurIPS 2022.\n\n[4] CodeUltraFeedback: An LLM-as-a-Judge Dataset for Aligning Large Language Models to Coding Preferences.\n\n[5] CodeT: Code Generation with Generated Tests, ICLR 2023.\n\n[6] Natural Language to Code Translation with Execution, EMNLP 2022.\n\n[7] B4: Towards Optimal Assessment of Plausible Code Solutions with Plausible Tests, ASE 2024.\n\n[8] Sifting through the Chaff: On Utilizing Execution Feedback for Ranking the Generated Code Candidates, ASE 2024." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We train models and build benchmarks to predict code preference towards verifiable properties and human preference." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024learning,\ntitle={Learning Code Preference via Synthetic Evolution},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4MWUdp6deL},\nnote={under review}\n}" }, "abstract": { "value": "Large Language Models (LLMs) have recently demonstrated remarkable coding capabilities.\nHowever, assessing code generation based on well-formed properties and aligning it with developer preferences remains challenging.\nIn this paper, we explore two key questions under the new challenge of code preference learning:\n(i) How do we train models to predict meaningful preferences for code? and\n(ii) How do human and LLM preferences align with verifiable code properties and developer code tastes?\nTo this end, we propose CodeFavor,\na framework for training pairwise code preference models from synthetic evolution data,\nincluding code commits and code critiques.\nTo evaluate code preferences,\nwe introduce CodePrefBench, a benchmark comprising 1364 rigorously curated code preference tasks to cover three verifiable properties—correctness, efficiency, and security—along with human preference.\nOur evaluation shows that CodeFavor holistically improves the accuracy of model-based code preferences by up to $28.8$%.\nMeanwhile, CodeFavor models can match the performance of models with $6\\sim 9\\times$ more parameters\nwhile being $34\\times$ more cost-effective.\nWe also rigorously validate the design choices in CodeFavor via a comprehensive set of controlled experiments.\nFurthermore, we discover the prohibitive costs and limitations of human-based code preference:\ndespite spending 23.4 person-minutes on each task, $15.1\\sim 40.3$% of tasks remain unsolved.\nCompared to model-based preference,\nhuman preference tends to be more accurate under the objective of code correctness,\nwhile being sub-optimal for non-functional objectives." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Code Generation", "Large Language Model", "Preference Learning", "Evaluation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/565586e8dffe117bed8212374deddd4773516d99.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Learning Code Preference via Synthetic Evolution" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4NRjdISWby
LoCA: Location-Aware Cosine Adaptation for Parameter-Efficient Fine-Tuning
main
Active
Parameter-efficient fine-tuning;discrete cosine transform;transfer learning;adaptation
transfer learning, meta learning, and lifelong learning
5;5;5;6;6;6
5;2;3;2;4;3
2;2;2;3;3;3
2;2;2;3;3;2
2;2;3;3;2;2
5.5
3.166667
2.5
2.333333
2.333333
-0.156174
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see the weaknesses above. A few additional questions are below:\n\n* Could a discussion on certain edge cases where the theory does not hold be provided? More precisely, would it be possible to find situations where the assumptions made in theory do not hold well, resulting in a breakdown of expected results?\n\n* Additionally, could results for ViT be provided in situations where the number of parameters are the same as parameters in FourierFT, for a more intuitive comparison?\n\n* I would also like to see results in the Natural Language Generation task - particularly for the GPT-2 training performed by FourierFT, as it would indicate the effectiveness of the method when ported to a Conv1D based implementation of the MLP." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "+ The paper takes an existing idea - learning PEFT parameters in the frequency domain and reconstructing the weight matrix from those learned parameters - and performs an in-depth analysis of the approach. Based on the obtained insights, it presents a new method that is theoretically better than existing approaches in approximating the ideal fine-tuning matrix and shows quantitative improvements on the base method, FourierFT, in most cases. \n+ The paper includes intuitive theoretical statements, backed by mathematical proofs, which is good to see in a PEFT paper since existing methods often lack theoretical insights and are heuristic-based. \n+ The paper is also well written and intuitive to follow, with rigorous experiments. \n+ I especially liked the approach presented for learning discrete locations to be optimized, which I believe is a novel contribution." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a novel low-rank adaptation approach for fine-tuning transformer-based pretrained models by deriving weights from parameters learned in the frequency domain using the iDCT operation. Compared to the similar existing method FourierFT, the approach, in theory, promises better reconstruction of the oracle update matrix. Empirically, the results do improve over the baseline FourierFT approach in most cases, indicating effectiveness of the approach. The paper also provides a method to learn locations in the frequency domain where coefficients are required, which is novel and interesting in the context of PEFT." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper starts with some terminology that are not elaborated: “optimization space” and “flexible optimization”. These terms are not defined precisely anywhere, nor is their link to the theory or empirical results clear. It would be better to ground the explanation to well-defined terms that are used in the analysis.\n\n- According to the reference (Yohai & Maronna, 1979), the initial assumption, the equation in Sec 2, L99, is true only under certain conditions. \nThis variant of the equation holds true only when $\\psi$ is monotone and X to have full rank. However, this issue is not addressed anywhere in the paper. For eg., the matrix X according to the paper is the input matrix. In practical implementation, the dimension X is m*n where (m<n), i.e. for X to be full rank, we need rank(X) = m. This is not stated or supported in the paper.\n\n- The theory presented is highly domain-specific: it does not translate to more general PEFT methods such as VeRA or DoRA, and requires significant theoretical adaptations to allow for comparisons with arbitrary low-rank methods. \n\n- The theory also does not *always* agree with practice - there are certain cases where LoRA and FourierFT perform better. This indicates come confounding factors, yet no discussion on this has been included. I do not see this as a reason to reject however, as this is commonly seen in this area, but would appreciate a discussion on the same.\n\n- In the case of ViT, I would have liked to see comparisons of the proposed approach with FourierFT having the same number of trainable parameters, as done for NLU and IFT tasks." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.Why are the accuracy rates of the baseline methods on the Stanford Cars and FGVC datasets more than 5% higher than those reported in related papers? I mainly compared the experimental results from the FourierFT paper(https://arxiv.org/pdf/2405.03003) and yours, and found that the differences are small on other datasets, but the results on the Stanford Cars and FGVC datasets are significantly beyond normal error margins. I am unsure whether this is due to errors caused by carelessness in the experimental process, or if you used different ViT models compared to theirs. Specifically, the experimental results on the Stanford Cars and FGVC datasets are emphasized in your work, and it is crucial to ensure the precision of these results.\n\n2.Why are there so few ablation experiments for FourierFT fine-tuning on ViT? As the most competitive counterpart, additional experimental results for FourierFT 239K and FourierFT 480K after fine-tuning on ViT could be included. After all, LoCA presents results for two different parameter budgets, while FourierFT only provides results for the smallest parameter budget for comparison, which does not meet the fairness requirements of an ablation study.\n\n3.What are the differences between LoCA and other methods in terms of Memory Cost and Training Time? You may use charts to illustrate these differences explicitly.\n\n4.Why does LoCA not show a significant advantage over FourierFT , on fine-tuning various foundation models such as RoBERTa, LLaMA, and ViT, in terms of reducing parameter budget and improving accuracy? Does this suggest that, while your work is strongly interpretable, it may have limited practical value?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.As emphasized by the authors, their iDFT-based variants has managed to outperform the expressivity of previous low-rank-based methods.\n\n2.Overall, the presentation is clear, supported by rigorous mathematical derivations and extensive experimental results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces Location-aware Cosine Adaptation (LoCA), a novel frequency-domain parameter-efficient fine-tuning method for pre-trained LLMs. By leveraging the inverse Discrete Cosine Transform (iDCT) as well as selectively learning components in the frequency domain, LoCA addresses the constraints of the naive low-rank adaptation (LoRA) method. \nIn a word, LoCA enhances expressiveness while maintaining computational efficiency." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.Some baseline experimental results differ significantly from those in related papers, which may indicate carelessness in the experimental process. Also, more ablation experiments are needed to increase confidence.\n\n2.For most datasets, LoCA doesn't show a clear advantage over FourierFT in terms of reducing parameter budget and improving accuracy.\n\nPlease see the questions section for more details." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See weakness." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper provides a rigorous theoretical comparison between frequency-domain and low-rank adaptation methods, filling a gap in the literature on expressivity and optimization constraints.\n2. LoCA’s use of iDCT with dynamic selection of frequency components represents a creative improvement over conventional low-rank methods, particularly for parameter efficiency." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a novel parameter-efficient fine-tuning method, Location-Aware Cosine Adaptation (LoCA), that leverages inverse Discrete Cosine Transform (iDCT) for selectively optimizing frequency-domain components in pre-trained language and vision models. LoCA aims to surpass traditional low-rank adaptations by dynamically choosing informative frequency components, thus balancing parameter efficiency and performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Overall it's a good paper, and I will raise my score if the authors could address my concerns.\n1. LoCA introduces a computationally complex process with alternating optimization steps and central difference approximation, which could pose practical challenges.\n2. How does LoCA handle potential noise in frequency component selection, and are there measures in place to stabilize the optimization process?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1.The paper primarily compares LoCA with LoRA-related fine-tuning techniques. Has consideration been given to performance comparisons with other fine-tuning methods such as prompt learning and adapter tuning?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. LoCA introduces a novel approach for parameter-efficient fine-tuning in the frequency domain through the inverse Discrete Cosine Transform (iDCT) and selective learning of frequency components. This method demonstrates the potential to surpass traditional low-rank decomposition techniques both theoretically and empirically, which is of significant value for resource-constrained environments and the deployment of large-scale models. Furthermore, your work provides a comprehensive theoretical analysis comparing frequency domain methods with low-rank decomposition approaches, which is meaningful.\n2. The methodology section of the paper is rigorous, and the experiments cover multiple domains, including natural language processing and computer vision. The paper offers comparisons with existing techniques, such as LoRA and FourierFT, which help readers understand the performance and efficiency of LoCA. Additionally, the in-depth theoretical analysis provides a solid foundation for frequency domain parameter-efficient fine-tuning methods.\n3. The writing of the paper is clear and logically structured, with a coherent flow from the introduction to the methodology, experimental results, and conclusions. In particular, the detailed explanation of how LoCA operates, including the application of the inverse Discrete Cosine Transform and the alternating optimization strategy, enhances the reader's understanding of the relevant work." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper titled \"LoCA: Location-Aware Cosine Adaptation for Parameter-Efficient Fine-Tuning\" introduces a novel parameter-efficient fine-tuning (PEFT) method called Location-Aware Cosine Adaptation (LoCA). This method is designed to adapt pre-trained large language models (LLMs) to downstream tasks with improved optimization flexibility and parameter efficiency. LoCA is based on the inverse Discrete Cosine Transform (iDCT) and selectively tunes learnable components at specific locations" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper contains a limited amount of content related to RELATED WORK, with insufficient coverage of the existing literature in the field.\n2. While the experimental results are convincing, the paper could further expand the experimental section to include the verification of LoCA's performance on more datasets. Additionally, a more in-depth analysis of LoCA's performance on different model scales and tasks of varying complexity would help to further demonstrate its applicability and robustness." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I will keep my positive score if the authors address Question 1. Other questions require much more experiment time and are quite minor to improve the paper.\n\n1. MT-bench is considered an unstable benchmark. It is strongly recommended that the authors utilize the MathInstruct Dataset instead, which is more stable and generally requires a higher level of expressive power.\n\n2. For fine-tuning Roberta, typical benchmarks include RTE, BoolQ, SST-2, WSC, WIC, MultiRC, SQuAD, CB, COPA, DROP, GSM8K, and ReCoRD. Could the authors consider adding any benchmarks that are currently missing?\n\n3. COLA, ReLoRA, and DoRA represent typical LoRA variants. It would be beneficial if the authors could include any of these variants that are not already covered.\n\n4. In Figure 3, it appears that the performance gain may continue to increase with a larger value of 'r.' Could the authors extend the range of 'r' to determine the optimal value that yields the best performance?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The concept of applying low-rank adaptation within the Fourier domain is intriguing, and it implicitly suggests a method of tuning that utilizes all available parameters.\n\n2. The theoretical results appear to be novel and have captured the interest of the reviewer.\n\n3. The proposed method delivers strong performance benefits while maintaining an exceptionally low parameter cost." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces Location-aware Cosine Adaptation (LoCA), a novel method for fine-tuning large language models (LLMs) and vision models in a parameter-efficient manner. LoCA is based on the inverse Discrete Cosine Transform (iDCT) and optimizes the locations of learnable frequency components. It addresses limitations of previous low-rank adaptation methods by providing greater optimization flexibility and expressivity. Theoretical analysis and empirical observations support the superiority of LoCA over traditional low-rank methods and iDFT-based methods. LoCA dynamically selects the most informative frequency components during training, leading to enhanced parameter efficiency and computational feasibility. The method demonstrates state-of-the-art performance on diverse language and vision tasks with fewer parameters. The introduction of the paper contextualizes the need for parameter-efficient fine-tuning methods due to the prohibitive costs of fully fine-tuning increasingly large models, and LoCA is presented as a solution that maintains performance while reducing trainable parameters." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The reviewer, not being an expert in this area, has not identified any major weaknesses. However, with some background in empirically tuning LLMs and ViTs, the reviewer would like to inquire further about the experimental setup.\n\n1. There lack some benchmarks and baselines. \n\n2. Common advantages of the PEFT method include reduced computation and memory costs. The paper's contribution would be strengthened if the authors included these aspects in their analysis." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "● Q1: LoRA may not be the most parameter-efficient approach among spatial-domain PEFT methods. The work by Kopiczko et al. (2023) in \"VeRA: Vector-based Random Matrix Adaptation\" (arXiv:2310.11454) demonstrates a more parameter-efficient and lightweight alternative, focusing on diagonal matrix adaptations to achieve efficient adaptation without the need for frequency-based transformations. Could the authors clarify whether their proof and theoretical framework apply to VeRA? Additionally, this paper lacks a comparative analysis of VeRA, both theoretically and experimentally. Would the established proof also support an evaluation of DoRA’s expressivity?\n\n● Q2: For each layer in LoCA, both frequency component locations and coefficients are optimized individually. This approach appears to introduce a higher number of parameters compared to FourierFT, which selects $n$ locations randomly and shares these locations across all layers. Specifically, FourierFT’s parameter count is:\n\n$2n + n* L = n (L + 2)$\n\nwhere $L$ represents the number of layers in the pre-trained model.\n\nIn contrast, LoCA introduces $2n$ parameters for each layer’s and locations, and $n$ for each layer’s coefficients, resulting in a total parameter count of:\n\n$3n \\times L$\n\nThis yields a parameter ratio between LoCA and FourierFT of:\n\n$\\frac{3L}{L + 2}$\n\n\nFor example, with LLaMA-2 7B where $L = 32$, LoCA’s parameter count is approximately 2.82 times that of FourierFT. This raises concerns about parameter efficiency, especially in large models. To clarify whether the additional parameters in LoCA yield proportional benefits, could the authors provide empirical comparisons across various model sizes and tasks, measuring both fine-tuning performance and resource usage (e.g., memory and compute requirements)? Specific metrics, such as performance improvements relative to parameter increase and scaling efficiency on different benchmarks, would help assess whether gains in expressivity or accuracy justify the increased parameter cost.\n\n● Q3: In lines 191-199, the authors claim that randomly selecting frequencies for FourierFT yields the lowest expressivity, performing worse than LoRA; however, this claim lacks experimental support. For instance, Figure 3 in this paper shows mixed results for FourierFT on the FGVC task, whereas Figure 4 in Gao et al. (2024), *\"Parameter-Efficient Fine-Tuning with Discrete Fourier Transform\"* (arXiv:2405.03003), presents contrasting findings, particularly on the QQP task in GLUE. In Gao et al., FourierFT consistently outperforms LoRA across GLUE benchmarks, achieving higher accuracy with minimal random spectrum updates and fixed locations across layers. Furthermore, Section C.2 on Expressive Ability in the FourierFT paper’s supplementary material reinforces FourierFT’s superior expressivity over LoRA. Could the authors provide empirical comparisons to clarify these discrepancies, ideally across multiple model sizes and tasks, with metrics on fine-tuning performance and resource usage (e.g., memory and computational requirements)? Demonstrating whether the increased parameter count in LoCA yields proportional performance benefits would strengthen the case for its efficiency.\n\n● Q4: Additionally, the paper lacks empirical evaluations comparing selective FourierFT and LoCA, which would be valuable in validating the theoretical claims. For instance, in line 191, the statement that $W_F(3)$ can outperform \\$W_F(2)$ would benefit from empirical results to illustrate how these specific configurations impact performance. Further analyses using different selection strategies within FourierFT would also help substantiate the expressivity claims and clarify the mixed findings observed.\n\n● Q5: The proof assumes asymptotic normality of incremental weight updates, enabling statistical analysis of expressivity via the Central Limit Theorem and empirical spectral density. However, in LoRA, only a subset of weights is updated through low-rank reparameterization, while frequency-based methods like LoCA further restrict updates to high-amplitude frequency components. Given that these updates are gradient-driven and thus correlated, the i.i.d. assumption essential for CLT may not strictly hold. With limited, targeted updates, the cumulative effect lacks the \"sum of many independent adjustments\" necessary to ensure asymptotic normality. Could the authors provide further justification for assuming convergence to normality under selective updating, and clarify how potential deviations from i.i.d. behavior may impact expressivity comparisons? It would be helpful if the authors could conduct specific analyses or empirical tests, such as quantifying deviations from normality in the weight updates or performing sensitivity analyses to assess the impact of non-normality on expressivity. \n\n● Q6: In the alternating optimization strategy, the method first optimizes the coefficients of selected frequency components $\\alpha$, before refining their locations for $B_a$ steps. Then, with $\\alpha$ fixed, it optimizes the locations $l$ for $B_l$ steps, and finally, the procedure fixes the locations and optimizes only the coefficients $\\alpha$ until convergence.\n\nCould the authors clarify the rationale behind this specific order of coefficient-first optimization and its impact on stability and convergence? While this separate optimization approach might simplify the process, it may not fully capture the interactions between coefficients and locations, potentially limiting optimality. Have the authors explored an alternative order—optimizing locations first and then coefficients—and could they provide insights on how this might affect convergence and final performance?\n\nIn the ablation study (lines 480-485), the authors present several variant comparisons, yet they do not include an analysis of this alternative pipeline. Additionally, how are the parameters $B_l$ and $B_s$ selected—is their choice task-specific? From Table 4, it appears that the V5 variant achieves relatively better results, but this is not consistent with the description of the alternative policy in lines 284-292 and the algorithm in lines 945-963. Could the authors clarify these inconsistencies and provide further justification for the selected optimization order and parameter settings? \n\n\n● Q7: Could the authors clarify how LoCA ensures stable convergence given the dynamic selection of specific magnitude(e.g., high-magnitude) frequency components in $\\Delta W$ across epochs? Specifically, as top-ranked frequencies may shift due to gradient changes, how does LoCA maintain consistency in updates to avoid potential instability in the training process? Additionally, could the authors explain how the specific frequency components selected in LoCA—whether high or low frequencies—consistently contribute to model performance across tasks? Is there a risk that focusing solely on high-magnitude components could lead to loss of task-relevant information in lower-magnitude frequencies, which may carry finer-grained details?\n\n\n● Q8: Could the authors clarify the computational and memory overhead associated with estimating location gradients using finite-difference approximation? Specifically, does this approach increase CUDA memory requirements significantly, and if so, how does it impact the overall efficiency of LoCA? Additionally, an analysis of the trade-offs between accuracy and resource usage in this approximation method would be valuable to understand its practical feasibility." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper introduces LoCA, a frequency-based approach to parameter-efficient fine-tuning that selectively optimizes specific frequency components using the Discrete Cosine Transform. By focusing on significant frequencies within weight matrices, LoCA aims to reduce the parameter count needed for fine-tuning while maintaining model expressivity. This selective frequency adaptation presents a practical alternative to spatial-domain methods like LoRA, providing a new angle on efficient model tuning. The paper’s theoretical framework, including empirical spectral density and the Central Limit Theorem to analyze expressivity, helps ground LoCA's approach in established statistical methods, adding quality to the work." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes LoCA, a method for fine-tuning pre-trained models using frequency-domain adaptation via the inverse Discrete Cosine Transform (iDCT). LoCA focuses on selecting key frequency components to improve the expressivity and efficiency of model adaptation. The theoretical analysis argues that iDCT-based adaptation can match or exceed the effectiveness of low-rank methods. However, the empirical gains over existing methods like LoRA are marginal, especially in vision tasks. LoCA’s added complexity, due to finite-difference approximations and alternating optimization, may not be fully justified by these modest improvements, potentially limiting its practical appeal." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "● This paper appears to provide inadequate empirical support for its theoretical claims. A central claim of the paper is that randomly selected frequencies for FourierFT yield lower expressivity than LoRA; however, this claim lacks direct experimental validation, which is critical to substantiate the theoretical conclusions. For instance, Figure 3 shows mixed results for FourierFT on the FGVC task, while Figure 4 in *Parameter-Efficient Fine-Tuning with Discrete Fourier Transform* by Gao et al. (2024) (arXiv:2405.03003) presents empirical evidence that contradicts this claim by showing that FourierFT achieves higher accuracy than LoRA across multiple GLUE benchmarks. Additionally, Section C.2 on Expressive Ability in the FourierFT paper’s supplementary material further supports FourierFT’s superior expressivity. The paper also lacks empirical evaluations for selective FourierFT and LoCA, which would further validate the claims made.\n\n● The paper omits a comparison with a highly representative spatial-domain PEFT method, VeRA, which focuses on lightweight adaptations in the spatial domain and would serve as a useful benchmark for LoCA's performance.\n\n● The design of LoCA introduces significant parameter overhead due to the individual optimization of frequency component locations and coefficients for each layer. For example, in a model with \\( L = 32 \\) layers (e.g., LLaMA-2 7B), LoCA’s parameter count is approximately 2.82 times that of FourierFT, raising concerns about the scalability and efficiency of LoCA for large-scale models.\n\n● The theoretical framework assumes asymptotic normality of weight updates, enabling the use of the Central Limit Theorem and empirical spectral density for analyzing expressivity. However, this assumption relies on i.i.d. updates, which may not hold in the context of gradient-driven, correlated weight adjustments inherent in LoRA and LoCA. Given the limited and targeted nature of LoCA’s updates, the cumulative adjustments may lack the “sum of many independent adjustments” necessary for CLT to apply reliably. This assumption weakens the robustness of the theoretical claims, as the actual distribution of weight updates is likely far from normal in practical implementations. \n\n● LoCA’s dynamic selection of high-magnitude frequency components across epochs may introduce instability during convergence, as the selection of significant frequencies may shift due to changing gradients. This could impact the model’s ability to achieve stable and consistent updates over time. Furthermore, by focusing solely on high-magnitude frequencies, LoCA risks omitting task-relevant information in lower-magnitude components, potentially limiting its adaptability in tasks requiring finer-grained details.\n\n● The method also relies on finite-difference approximation to estimate location gradients, which introduces additional computational and memory costs. This overhead may significantly increase CUDA memory requirements, particularly in high-dimensional models or when frequent updates are necessary." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024loca,\ntitle={Lo{CA}: Location-Aware Cosine Adaptation for Parameter-Efficient Fine-Tuning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4NRjdISWby},\nnote={under review}\n}" }, "abstract": { "value": "Low-rank adaptation (LoRA) has become a prevalent method for adapting pre-trained large language models to downstream tasks. However, the simple low-rank decomposition form may constrain the optimization flexibility. To address this limitation, we introduce Location-aware Cosine Adaptation (LoCA), a novel frequency-domain parameter-efficient fine-tuning method based on inverse Discrete Cosine Transform (iDCT) with selective locations of learnable components. We begin with a comprehensive theoretical comparison between frequency-domain and low-rank decompositions for fine-tuning pre-trained large models. Our analysis reveals that frequency-domain approximation with carefully selected frequency components can surpass the expressivity of traditional low-rank-based methods. Furthermore, we demonstrate that iDCT offers a more efficient implementation compared to inverse Discrete Fourier Transform (iDFT), allowing for better selection and tuning of frequency components while maintaining equivalent expressivity to the optimal iDFT-based adaptation. By employing finite-difference approximation to estimate gradients for discrete locations of learnable coefficients on the DCT spectrum, LoCA dynamically selects the most informative frequency components during training. Experiments on diverse language and vision fine-tuning tasks demonstrate that LoCA offers enhanced parameter efficiency while maintains computational feasibility comparable to low-rank-based methods." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Parameter-efficient fine-tuning", "discrete cosine transform", "transfer learning", "adaptation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/4c737e648836f6f708508659aa0fae987f7d241a.pdf" }, "presentation": null, "primary_area": { "value": "transfer learning, meta learning, and lifelong learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/4f5c88d595653e74bb8b81085101719dd5ade5aa.zip" }, "title": { "value": "LoCA: Location-Aware Cosine Adaptation for Parameter-Efficient Fine-Tuning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4NTrco82W0
Beyond Squared Error: Exploring Loss Design for Enhanced Training of Generative Flow Networks
main
Active
GFlowNet;Generative Models;f-Divergence;Loss Function
generative models
5;6;6
4;3;3
2;4;3
2;3;3
3;4;3
5.666667
3.333333
3
2.666667
3.333333
-1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Could the authors clarify if they’ve noticed stability shifts in higher-dimensional or complex tasks and if adjustments might bolster robustness? Additionally, what drives the choice of a limited set of losses—are there theoretical or practical reasons for omitting other f-divergences, like Hellinger? Lastly, insights into each loss function’s hyperparameter sensitivity and effects on convergence guarantees would further clarify their resilience and adaptability." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper introduces a systematic framework for designing regression losses in GFlowNet training, linking each loss to specific divergence measures for targeted properties. Resulting in three new losses—Shifted-Cosh, Linex(1/2), and Linex(1)—that enhance exploration and exploitation balance." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a novel framework for GFlowNet objective functions, unifying existing training algorithms and clarifying key components. By establishing a connection between objective functions and divergence measures, it offers valuable insights into designing effective training objectives. The authors investigate key regression properties—zero-forcing and zero-avoiding—and propose three new loss functions (Linex(1), Linex(1/2), and Shifted-Cosh) to balance exploration and exploitation. Extensive experiments on benchmarks, including hyper-grid, bit-sequence, and molecule generation, show that these losses outperform the common squared loss in convergence speed, diversity, quality, and robustness." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Broader exploration of other potential divergence-based losses would offer a more comprehensive understanding of the effects of different divergence properties on GFlowNet training. Although the novelty lies in extending GFlowNet loss functions, there are similar attempts in reinforcement learning and generative models. Although the paper derives theoretical properties of zero-forcing and zero-avoiding, it lacks direct theoretical comparison with existing GFlowNet training algorithms." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Areas for improvement and suggestions:\n\n1. Making a literature connection with f-GAN, which also uses f-divergence in GANs, might be insightful to readers.\n\n\n2. Include a discussion connecting with off-policy exploration methods. Are your loss and off-policy search orthogonal? Which means, is your loss function combined with an off-policy method (e.g., local search) better than the TB loss combined with an off-policy method?\n\n\n3. It's good to see the categorization of prior GFlowNet works. Can you include this recent work [1] that uses genetic search as an off-policy method for training GFlowNets and provide some discussion?\n\n\n\n[1] Hyeonah Kim et al., \"Genetic-guided GFlowNets for Sample Efficient Molecular Optimization,\" NeurIPS 2024." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is well-written and easy to follow.\n\n\n2. The theories are insightful." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a novel theoretical finding for GFlowNets regarding their objective function. Using f-divergence theories, they connect existing objectives of GFlowNets and show that they are special cases of the squared loss. They design a new loss structure that combines both properties together: (1) zero forcing (as considered in existing losses) and (2) zero avoiding, which compensates for exploration. Their new loss function seems to have empirical benefits." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The empirical results seem to be weak; they are only varied in synthetic tasks" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "See Weaknesses." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "Contributions:\n- generalizing the objective function of gflownet\n- derive impact of loss function on the gradient\n- define zero-forcing (encourage epxloitation) and zero-avoiding (encourage exploration) as two key properties induced by certain loss functions\n- They create 3 new losses (alonside the existing quadratic loss) to tackle all 4 possible combinations (with/without zero-avoiding, with/without zero-forcing)\n- Linex(1) corresponds to the KL divergence\n- experiments on 3 datasets\n\t- Non-zero-forcing losses (Linex(1) and Linex(0.5)) converge faster on hyper-grid\n\t- Linex(1) obtains all the modes almost always the fastest, but spearman corr between train and test is highest for shifted-cos on bit-sequence\n\t- Linex(1) tends increase diversity while quadratic and shifted-cos give higher quality (high average rewards) on molecule generation\n\nPaper is well written." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose to modify the loss function of Gflownet (which has been completely overlooked by prior work). They show that dinstinct losses lead to different divergences. They propose three new loss functions, evaluate them extensively on diverse benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- it would be nice to have a few more choices of losses, taking inspiration let say from f-divergences (Reverse-KL, JSD, etc.)\n- there may be more than just zero-forcing and zero-avoiding to the key properties of loss functions hence why studying more losses would be helpful\n- it would be nice to let say consider hybrid methods with some kind of annealing. For example, why not use Linex(1) for its fast convergence to a large number of nodes, before then transitioning to shifted-cos for higher rewards around those now-discovered modes.\n\nSo to me, the paper is great, but its kind of stopping too quickly, it feels like its only just tapping the surface. These kinds of ideas would be easy to test out and add to the papers. \n\nIf the authors add bit more meat to the paper, i.e. extra loss functions and hybrid-annealing (as discussed above), I would likely increase my score. Like I said, its just missing a little bit of filling to make it a great paper." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024beyond,\ntitle={Beyond Squared Error: Exploring Loss Design for Enhanced Training of Generative Flow Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4NTrco82W0},\nnote={under review}\n}" }, "abstract": { "value": "Generative Flow Networks (GFlowNets) are a novel class of generative models designed to sample from unnormalized distributions and have found applications in various important tasks, attracting great research interest in their training algorithms. In general, GFlowNets are trained by fitting the forward flow to the backward flow on sampled training objects. Prior work focused on the choice of training objects, parameterizations, sampling and resampling strategies, and backward policies, aiming to enhance credit assignment, exploration, or exploitation of the training process. However, the choice of regression loss, which can highly influence the exploration and exploitation behavior of the under-training policy, has been overlooked. Due to the lack of theoretical understanding for choosing an appropriate regression loss, most existing algorithms train the flow network by minimizing the squared error of the forward and backward flows in log-space, i.e., using the quadratic regression loss. In this work, we rigorously prove that distinct regression losses correspond to specific divergence measures, enabling us to design and analyze regression losses according to the desired properties of the corresponding divergence measures. Specifically, we examine two key properties: zero-forcing and zero-avoiding, where the former promotes exploitation and higher rewards, and the latter encourages exploration and enhances diversity. Based on our theoretical framework, we propose three novel regression losses, namely, Shifted-Cosh, Linex(1/2), and Linex(1). We evaluate them across three benchmarks: hyper-grid, bit-sequence generation, and molecule generation. Our proposed losses are compatible with most existing training algorithms, and significantly improve the performances of the algorithms concerning convergence speed, sample diversity, and robustness." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "GFlowNet", "Generative Models", "f-Divergence", "Loss Function" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/19af6e2a44f9fa1923f4536bbf114c1797ae8ae0.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Beyond Squared Error: Exploring Loss Design for Enhanced Training of Generative Flow Networks" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4NWtrQciRH
Evidential Learning-based Certainty Estimation for Robust Dense Feature Matching
main
Active
Evidential Deep Learning;Dense Feature Matching;Pose Estimation
applications to computer vision, audio, language, and other modalities
3;5;6;6
4;3;4;3
3;3;3;3
2;3;2;3
2;2;3;4
5
3.5
3
2.5
2.75
-0.408248
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Following are the questions for which I would highly appreciate an answer, these questions have not impacted my current recommendation for this paper, however, the response might have a significant impact on my final recommendations.\n\nQ1- **Unclear evaluation details for adversarial attacks used.** \nThe epsilon values used for attack are starting from 0.1, 0.2 going up to 1., here the attacks l-infinity norm bounded? If yes, then what is the valid image space? Is it [0, 1] or is it [0, 255], meaning when epsilon = 1, does this mean that the epsilon is actually 1/255 (meaning that the valid image space is [0, 255]), or is the value of epsilon actually 1, meaning the entire image is nothing but adversarial noise? In this case, the image would also look semantically different to the human eye meaning that it will no longer be a valid adversarial attack.\nAnd if the epsilon value is in fact 1/255, then the drop in performance is too significant for a very small epsilon value indicating the method is not truly robust to adversarial attacks. Could you also please comment on this?\n\nQ2- **The idea of using Evidential Learning for Pixel-Matching is not entirely novel.** \nWhile the exact downstream task in [3] is different from the one explored by this proposed work, the core ideas for both seem unusually very similar, the key difference being the distributions used, while [3] used a Normal Inverse-Gamma (NIG) distribution, this work uses a Dirichlet distribution. Would you please further highlight the key differences between the two other than some task-related implementation details?\n\n\n**References**\n\n[3] Chen Wang, Xiang Wang, Jiawei Zhang, Liang Zhang, Xiao Bai, Xin Ning, Jun Zhou, Edwin Hancock,\nUncertainty estimation for stereo matching based on evidential deep learning,\nPattern Recognition, Volume 124, 2022,108498, ISSN 0031-3203, https://doi.org/10.1016/j.patcog.2021.108498. (https://www.sciencedirect.com/science/article/pii/S0031320321006749)" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "If the idea of using evidential learning for feature matching is truly novel then that makes the work quite interesting and significant.\nApart from a couple of small typos, the paper is very well written. \nThe structure of the paper and the intended story are easy to follow.\nThe abstract of the paper is well written and to the point." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents an interesting idea of evidential learning for certainty estimation for the dense pixel-level feature matching task. \nThe proposed method is supposedly more OOD and adversarial robust than the current SotA RoMa. \nIt is tested against 2D Common Corruptions variants of 2 commonly used datasets for this task i.e. MegaDepth-1500 and ScanNet-1500.\nIt is also tested against outdated adversarial attacks such as FGSM and PGD." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "W1- **A lot of implementation details are missing from the paper.**\nSimply mentioning that it is built on top of RoMa is insufficient information.\nIt is understandable to do so for the main paper to save space; however, the supplementary material should be used to provide such information, for example, the exact architecture, the training procedure, details about the datasets, HPC resources used, and other details important for reproducibility. \n\nW2- **Needs a stronger argument for why OOD and Adversarial Robustness is important.**\nThe argument made in the introduction to explain why OOD and Adversarial robustness are important for this task can be made significantly stronger. Unfortunately, a case has not been made for why this is interesting and important for the community. \n\nW3- **Out-dated evaluations for robustness.**\nIf the argument for OOD and Adversarial robustness is readiness for the real world, then the evaluations used do not hold up to the argument. Since the 3D Common Corruptions [1] are more real-world common corruptions than the 2D Common Corruptions used in the paper. Additionally, FGSM and PGD attacks were used for evaluating adversarial robustness, however [2] showed in their work that these attacks, originally proposed image classification are inadequate for pixel-wise prediction tasks, such as the one used in this proposed work. This is because FGSM and PGD optimize the attack by increasing the aggregate loss and not the per-pixel loss, this can cause the attack to be highly localized making a non-robust method appear very robust as the mean performance would still be quite well over the rest of the image space. Thus, specialized pixel-wise attacks such as CosPGD are essential for truly evaluating the adversarial robustness of pixel-wise prediction tasks. \n\nW4- **Using 2D Common Corruptions on other known datasets is not always a novel contribution.**\nIt is unclear if the contribution of the 2 supposed OOD Robustness evaluation datasets MegaDepth-1500-C and ScanNet-1500-C is merely using 2D Common Corruptions proposed for ImageNet-1k and CIFAR datasets but changing their resolutions and applying them to the respective iid datasets or if there is more to the story, for example, some unforeseen complications that needed to be handled? If not, then simply applying these corruptions to other datasets is not exactly a novel contribution, it is still an interesting study just not a \"new contribution\" as claimed in the bullet points in the introduction of the paper.\n\nW5- **Almost Redundant Presentation of Results.**\nIncluding both Table 1 and Figure 3 is redundant. I understand that Table 1 contains the mean values over the 5 severity levels while Figure 3 shows the values at each severity, however by using straight dashed lines of respective colors, with y = mean value for all x values the need for Table 1 is eliminated.\n\n\n\n\n**References**\n\n[1] Kar, Oğuzhan Fatih, et al. \"3d common corruptions and data augmentation.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2022.\n\n[2] Agnihotri, Shashank, Steffen Jung, and Margret Keuper. \"CosPGD: an efficient white-box adversarial attack for pixel-wise prediction tasks.\" Forty-first International Conference on Machine Learning. 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses section." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is well-motivated, and the main idea is clearly explained.\n\n2. Experiments are conducted across a wide range of benchmarks with various types of corruptions and adversarial attacks. The proposed method outperforms in most cases.\n\n3. The paper includes visualizations to analyze why the proposed method performs better than comparison method, particularly on corrupted data across different datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper applies evidential deep learning to feature matching tasks, introducing an evidential learning framework for certainty estimation in dense feature matching problems. The proposed method enhances robustness in dense matching against corruptions and adversarial attacks, with extensive experiments conducted and visualization presented to demonstrate its performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Several questions need to be addressed:\n\n1. This work employs a two-dimensional evidential deep learning (EDL) framework to certainty estimation in both coarse-scale and fine-scale losses. What would happen if EDL were applied to only one of these loss scales? Conducting an ablation study could provide insights into the effectiveness of EDL at each scale. It would be great if the authors could report performance results by applying EDL exclusively to coarse-scale or fine-scale losses, compared to using it on both losses. \n\n2. Experiments are conducted on two datasets, MegaDepth-1500 and ScanNet-1500. There are other datasets mentioned in RoMa paper such as the street-view IMC 2022 and the challenging WxBS Benchmark. Evaluating the proposed method on those different datasets could further demonstrate its generalizability across diverse scenarios. \n\n3. The proposed framework incorporates evidential deep learning into the training process. Could you provide details on how the proposed framework affects computational time, specifically in terms of training and inference times compared to the baseline RoMa method?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How much does the threshold used for balanced sampling matter for the robustness under image degradations? Both in the original RoMa and the new model.\n2. My reading is that computationally, the new certainty estimation is more or less as heavy as the old in terms of inference and training speed. Is this correct?\n3. Is the increased performance due to only better certainties? For example, is the performance the same if certainty scores from the new model are combined with matches from the original RoMa?\n4. Is there a way to make use of the built in uncertainty in the Dirichlet distribution as described in Weakness 3?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper proposes a simple tweak to the RoMa-method, which gives good results experimentally.\n2. Within the framework of the RoMa matcher, certainties are updated iteratively in each warp refinement step by a \"logit-offset\" in the original model. It seems more intuitive to let each refinement step produce positive evidence values for the two classes \"correct\" and \"incorrect\" that are summed over the steps, as is done in this paper. Perhaps, the authors could expand on this in the paper.\n3. In general, outputting good certainties is an underexplored part of deep learning for 3D vision. The application of evidence based learning to dense matchers is novel." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose modelling certainty of correspondences in dense matchers using an evidential deep learning approach. Instead of just estimating a certainty between 0 and 1, the model outputs the parameters of a Dirichlet distribution over probabilities of the two classes \"the predicted correspondence is reliable\" and \"the predicted correspondence is unreliable\". The certainty output is then the expected probability of the first class according to this Dirichlet distribution. The authors show experimentally by retraining the dense matcher RoMa that their approach leads to improved certainty scores, in particular leading to increased robustness to image corruptions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The experiments are limited in that only a single model is tested. Hence, it is difficult to say if the improvements generalize to other models.\n2. The explanation of evidential deep learning was difficult to understand, and I had to refer back to the original paper by Sensoy et al. I think this section could be improved.\n3. Evidential deep learning has a built-in uncertainty measure. In the context of the present paper, we get a Dirichlet distribution over the classes \"the predicted correspondence is reliable\" and \"the predicted correspondence is unreliable\", and the associated uncertainty describes how spread out this Dirichlet distribution is. This uncertainty is however not used in the present approach. This makes it a bit difficult to interpret the method. We get a Dirichlet distribution but only use its expected value over the first class. This expected value should signify correspondence reliability, but there is also an uncertainty of this prediction inherent in the Dirichlet distribution, which is not used. Since RoMa uses regression-by-classification in the coarse matching step, a more natural approach may be to reformulate the loss for that classification over $N\\times N$ image patches as evidential and use the uncertainty of the predicted Dirichlet distribution as an uncertainty score." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "If EDL is mainly used for certainty estimation, what are the differences or relationships of the proposed method compared to outlier filtering post-processing in feature matching (RANSAC)?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The incorporation of EDL to dense feature matching is interesting, and has not been investigated before.\n2. The proposed method enjoys good performance in corrupted data." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes to unify evidential deep learning (EDL) and dense feature matching, achieving more robust matching results, especially for corrupted image pairs. The authors propose MegaDepth-1500-C and ScanNet-1500-C benchmarks to evaluate the robustness of the proposed method under common image corruptions. The proposed method enjoys superior results in both clean and corrupted data." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Although the point of EDL is interesting, the usage of EDL for certainty estimation in dense feature matching is still questionable. From the introduction in Section 2.3, I think EDL's main advance is to detect out-of-distribution samples or mining pseudo-unknown objects. However, the certainty estimation in feature matching is just a binary classification task (matched or not matched). Why is EDL still effective? The authors did not provide a more insightful discussion about this key question.\n\n2. The overall contribution is limited. Because of lacking enough in-depth discussion about EDL and certainty estimation in feature matching, makes this work appear more as a mere combination of these two approaches rather than a convincing exploration.\n\n3. The introduction of EDL in Section 3.2 is insufficient, missing the necessary background/preliminary in related works.\n\n4. Experiments are not sufficient, missing discussion of visual localization (InLoc and AachenDay-Night) and homography estimation (HPatches). The proposed method achieves significant improvements in corrupted data, while the improvements based on clean data are limited. As a general certainty estimation, the usage of EDL should consistently improve the matching accuracy in all scenarios." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "An evidential deep learning framework to improve the robustness of dense feature matchers" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024evidential,\ntitle={Evidential Learning-based Certainty Estimation for Robust Dense Feature Matching},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4NWtrQciRH},\nnote={under review}\n}" }, "abstract": { "value": "Dense feature matching methods aim to estimate a dense correspondence field between images. Inaccurate correspondence can occur due to the presence of unmatchable region, highlighting the need for certainty measurement. This is typically addressed by training a binary classifier to decide whether each predicted correspondence is reliable. However, deep neural network-based classifiers can be vulnerable to image corruptions or perturbations, making it difficult to obtain reliable matching pairs in corrupted scenario. In this work, we propose an evidiential deep learning framework to enhance the robustness of dense matching against corruptions. We modify the certainty prediction branch in dense matching models to generate appropriate belief masses and compute the certainty score by taking expectation over the resulting Dirichlet distribution. We evaluate our method on a wide range of benchmarks and show that our method leads to improved robustness against common corruptions and adversarial attacks, achieving up to 10.1% improvement under severe corruptions." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Evidential Deep Learning", "Dense Feature Matching", "Pose Estimation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/790294d561a63d2bc2a422f266be24e2e4707c7e.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Evidential Learning-based Certainty Estimation for Robust Dense Feature Matching" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4NgxI6Z74n
Memory-Efficient Self-Supervised Contrastive Learning with a Supervised Loss
main
Active
contrastive learning;self-supervised learning;representation learning;machine learning theory
unsupervised, self-supervised, semi-supervised, and supervised representation learning
3;3;5;5
4;3;4;4
2;2;2;3
2;1;2;2
3;1;2;1
4
3.75
2.25
1.75
1.75
0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "None" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. This paper has theoretical claims that connects DIET to CL.\n2. This paper has some empirical evidence that the proposed S-DIET can match the performance on benchmarks like CIFAR and ImageNet-100." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies DIET, which is a method that has rather few adoptions. This paper claims that DIET uses fewer memory than common contrastive learning approaches, and proved some theoretical results showing that DIET and spectral contrastive learning share the same solutions. Moreover, this paper proposes a new alternative S-DIET to further improve its performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The method DIET studied in this paper has a fatal limitation, which is that the labels are essentially the sample index. As the dataset size increases, the classification head will need to increase linearly as well, which makes it impractical to use in large scale dataset training. Even though the proposed S-DIET does not require the classification head to be always loaded into the memory, it is still unnecessary to store such a large head, especially when one is training on millions of data." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "Please refer to the weakness." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper provides comprehensive and rigorous theoretical proofs.\n2. Addressing the high memory demand of DIET is a well-motivated objective with strong practical significance.\n3. The experimental results presented in Table 5 demonstrate promising improvements.\n4. The paper conducts a detailed and insightful ablation study." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents S-DIET, a memory-efficient modification of DIET for self-supervised contrastive learning. It proves that DIET with a linear encoder and MSE loss is theoretically equivalent to spectral contrastive loss, and proposes feature normalization and projection head use to enhance performance. S-DIET significantly reduces DIET's memory requirements and achieves state-of-the-art performance without extensive hyperparameter tuning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper aims to address three issues: why DIET can perform comparably to CL, DIET's failure to learn all features, and its high memory demand. However, these issues are addressed in a fragmented manner without clear logical connections between them, making it difficult for readers to grasp the paper's central thesis.\n2. The paper does not provide code or pseudocode, which hinders understanding of the proposed method and limits the ability to verify its effectiveness.\n3. It is well-known that MSE is not typically used as a loss function for classification tasks. In the original DIET paper, each sample is treated as a separate class in a classification problem using cross-entropy loss. Why, then, is MSE employed as the loss function in Section 4? Does Theorem 4.5 hold if cross-entropy loss is used instead?\n4. Due to the use of W1, it is unclear how Theorem 4.3 is related to the proposed method.\n5. The theoretical analysis in the paper relies entirely on linear assumptions, while the proposed method (Equation 5) is based on empirical assumptions. These assumptions raise concerns about the rigor of this work." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1) The review in the manuscript is clearly incomplete. Some important literature are missing. The author may want to have a more comprehensive literature review. \n\n2) The analysis in the Section 4 is not surprising due to the linear setting. Can the analysis be extended to the nonlinear case? The author may read the recent development under the nonlinear setting in Wang 2023. \n\n3)Assuming $W_H$ is an isometry seems a very strong assumption. Do the authors put a constraint for $W_H$ in the loss function to enforce such isometry? Otherwise, the authors may consider removing this assumption.\n\n4)The form of training example introduced in Section 5 is unrealistic. When does this assumption hold (at least approximately) in piratical example? Why are there two features, one is low noise and the other is high noise?\n\n5)Can we generalize the results in Theorem 5.1 to a more realistic setting?\n\n6) The idea in memory-efficient DIET is straightforward." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The connection between DIET and spectral contrastive loss is interesting." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The manuscript studied the property of a supervised representation learning, called DIET, and then propose an improved version of this method. Specifically, the authors show the equivalence between DIET and spectral contrastive loss proposed by HaoChen \\& Ma under a linear case. In addition, the improvement is motivated by the insight derived from the model introduced in the setting of Section 5.1. Although it looks interesting when all the strong assumptions are true, it is unclear if the results presented in the manuscript can provide guidance in the practical settings." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The analysis is built on some unrealistic assumptions. It is unclear whether the results and development in the manuscript can hold in practice. It could be more meaningful to make assumptions more carefully." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see **Weaknesses**." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- This paper explore the limitation of DIET, an important method of CL, and obtain several conclusions: (i) for linear encoders, DIET with MSE loss is equivalent to spectral contrastive loss; (ii) DIET tends to learn features with less noise but may not capture all relevant aspects of the training data; (iii) feature normalization can help mitigate this issue, while incorporating a projection head can further enhance performance. \n\n- This work further introduces SCALED-DIET (S-DIET) to improve the model's linear probe accuracy, i.e., use batch cross entropy and the multistep update formula for AdamW.\n\n- Some experiments demonstrate the effectiveness of the proposed S-DIET." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This study provides a theoretical analysis of DIET, a recently proposed supervised approach for self-supervised learning. DIET, as a menthod of CL, labels each example by its datum index and employs a supervised loss for training. This work obtains several conclusions, including (i) for linear encoders, DIET with MSE loss is equivalent to spectral contrastive loss; (ii) DIET tends to learn features with less noise but may not capture all relevant aspects of the training data; (iii) feature normalization can help mitigate this issue, while incorporating a projection head can further enhance performance. This work further introduces SCALED-DIET (S-DIET) to improve the model's linear probe accuracy." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The motivation behind this paper is unclear. According to P2 in the Introduction, DIET's advantage lies in its ability to mitigate CL's reliance on large datasets, while requiring a smaller parameter dimension to balance with sample size. The claim that smaller encoder dimensions are a key drawback for handling large data is not well justified. Furthermore, the authors state, \"not clear whether DIET can capture the pairwise similarities between views...SSL,\" yet DIET, as a CL algorithm utilizing supervised loss, does not explicitly depend on pairwise similarities; this is merely an implementation choice rather than a fundamental mechanism. I fail to see how this motivation strongly connects DIET with contrastive loss, is pairwise similarity closely related to memory? While exploring CL from an efficiency perspective do be valuable, a thorough reading of the paper reveals a lack of such information. Perhaps I overlooked some details, and I hope the authors can clarify their insights.\n\n- The key conclusions in this work need further explanation in relation to the core ides, i.e., MEMORY-EFFICIENT. For instance, the relationships between memory and features, encoder parameter dimensions, and projection heads are not adequately described, leading to fragmented conclusions.\n\n- The choice to study DIET is justified by its independence from large training data, but there are other CL algorithms that also do not rely on large datasets, such as few-shot SSL. Additionally, DIET requires labeled information. Can the analyses in this work be applied to these other methods? If so, what differentiates this work? A broader exploration of algorithms and their mechanisms might enhance the reliability of this study.\n\n- A code that can reflect the idea of ​​algorithm implementation is encouraged, since it is currently only described in 6 lines. At the same time, the introduction of these modules will increase the computational overhead, and related experiments are also necessary, after all, the focus is on memory.\n\n- (Minor) The paper's template appears to differ from the one provided on the official website, such as in the line numbering. Please consider making further corrections.\n\n**I would be happy to reconsider my score if these concerns can be addressed.**" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "The margin difference is not noticeable without a ruler (all the style difference of this paper is due to the usage of a legacy template), and the paper will fit into 10 pages with the correct template. After more cases emerges and based on more discusses, we decided to lean on the lenient side. Please proceed with reviewing of this paper." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": { "value": "We approve the reversion of desk-rejected submission." }, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": { "value": "margin violation" }, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": { "value": "Submission Desk Rejected by Program Chairs" }, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024memoryefficient,\ntitle={Memory-Efficient Self-Supervised Contrastive Learning with a Supervised Loss},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4NgxI6Z74n},\nnote={under review}\n}" }, "abstract": { "value": "Contrastive Learning (CL) is among the most popular methods for self-supervised representation learning. However, CL requires a large memory and sample size and careful hyperparameter tuning.\nThese factors make it difficult to\nlearn high-quality representations with limited amount of memory. In this work, we theoretically analyze a recently proposed \\textit{supervised} approach, DIET, for self-supervised representation learning. DIET labels every example by its datum index and trains on the labeled data with a supervised loss. DIET does not require a large sample size \nor hyperparameter tuning. However, it falls short when using smaller encoders and is memory intensive due to its massive classifier head.\nGiven its remarkable simplicity, it is not obvious whether DIET can match the performance of CL methods, which explicitly model pairwise interactions between augmented examples. We prove that, perhaps surprisingly, for a linear encoder DIET with MSE loss is equivalent to spectral contrastive loss. Then, we prove that DIET is prone to learning less-noisy features and may not learn all features from the training data. We show feature normalization can provably address this shortcoming and use of a projection head can further boost the performance. Finally, we address the scalability issue of DIET by reducing its memory footprint.\nThe modified approach, namely S-DIET, substantially improves on the linear probe accuracy of DIET across a variety of datasets and models and \noutperforms other SSL methods,\nall with limited memory and without extensive hyperparameter tuning. This makes S-DIET a promising alternative for simple, effective, and memory-efficient representation learning." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "contrastive learning", "self-supervised learning", "representation learning", "machine learning theory" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/f0ef8fc90f6b04c9143f1e0c456623cbfdc166f6.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Memory-Efficient Self-Supervised Contrastive Learning with a Supervised Loss" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4NsYCAxubi
fPLSA: Learning Semantic Structures in Document Collections Using Foundation Models
main
Active
Natural Language Processing;Large Language Models;Document Analysis;Latent Semantic Analysis
foundation or frontier models, including LLMs
3;3;3;5
4;3;4;2
2;2;2;3
2;2;2;3
1;2;2;3
3.5
3.25
2.25
2.25
2
-0.870388
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. What prompt templates are used in various experiments?\n2. How are the textual descriptions $\\theta_t$ initialized in the algorithm? How can the initial tag descriptions meaningfully be assigned to the segments?\n3. Sec 4.1 Evaluation Datasets: How do you convert the input query and the output answer of each example into segments? Do you learn tags only for segments of answers, but not queries/prompts/questions?\n4. Sect 4.1: This is titled Evaluation Datasets but indeed describes data for clustering and tagging.\n5. L195: How do you sample alternative segments?\n6. L188: What do you mean by the test documents? The datasets are query-answer examples.\n7. In the Hits@K evaluation:\n(a) Do you first generate the tag sequence based on the input of a test example or randomly? \n(b) Does a model predict an answer based on the tag sequence in one prompting call?\n(c) How do you evaluate if a sampled solution is correct or not?\n(d) Why do you say the proposed algorithm improves diversity in outputs, which is not evaluated? In fact, diversity is neither necessary nor sufficient for a model to perform a reasoning task well." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- This work proposes to discover “tags” for text segments in an unsupervised fashion and a novel algorithm that is inspired by the probabilistic latent semantic analysis (PLSA).\n- The algorithm leverages the ability of an LLM to analyze textual materials and is able to find detailed and meaningful tags, as shown in the qualitative results of the paper.\n- The paper show favorable empirical results compared to multiple baselines: traditional latent Dirichlet allocation, it variant + LLM, prompting, and chain-of-thought prompting." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the problem of discovering text segments that share common characteristics and assigning them the same tag description. It proposes an LLM-based method that iteratively assigns tags to document segments and improves each tag description based on the segment cluster. The authors aim to show that these tags are helpful for a reconstruction task and in improving “Hits@K” accuracy in evaluation sets created from WritingPrompts, MATH, and the BBH benchmark." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The writing in this paper is often too generic and high-level. For example, when a reader reads the motivation at L011-L014 “Humans have the ability to learn new tasks by inferring high-level concepts from existing solutions, then manipulating these concepts in lieu of the raw data. Can we automate this process by deriving latent semantic structures in a document collection using foundation models?”, they may wonder: \n - What tasks do you mean?\n - Existing solutions of the “new tasks” or other relevant tasks?\n - What does it mean to manipulate high-level concepts?\n - How do you define “semantic structures” in a document collection? It’s not precise to describe a set of “tags” as a structure.\n2. The novelty of the method is limited and its connection to PLSA and EM is loose. The proposed algorithm is simple: (1) Initialize a certain number of tag descriptions. (2) Prompt an LLM to assign a tag to each document segmentation based on the tag descriptions. (3) Let an LLM generate a new tag description that describes the shared characteristics of the segments in this cluster.\n - The main Eq. (4) is actually not used: $p(d)$, $p(x_k|d)$, $p_\\Theta(t|x_k,d)$, and $p_\\Theta(w_{1\\dots n}|t)$ are not computed.\n - L153: The parameters $\\theta_t$ are textual descriptions instead of floating-point parameters and no training is happening.\n - L157: No probability distribution is involved. An LLM is employed to greedily perform the steps in the algorithm.\n - PLSA is a generative model of the training documents that it is estimated on, and it is not a generative model of new documents. But this paper aims to find tags that apply to unseen examples.\n3. While the convergence criteria matters for an EM algorithm, this paper simply sets the number of iteration to 30. Not enough analyses is perform on the impact of the number of iterations.\n4. In the reconstruction experiments the method based on learned tags solves a multiple choice problem of picking the ground truth $x_k$ from a set of candidate segments. However, baselines such as prompting in Eq. (7) requires a language model to generate the ground truth $x_k$. These seem not comparable.\n5. Although the experiment results are positive compared to the baselines, the setups are synthetic. Would be nice to see the application of this algorithm to achieve competitive results according to standard evaluation metrics of the used datasets, which are common benchmarks.\n6. Many details are missing. See the Questions below." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "please refer to the weaknesses" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "(1) Study a classic task\n\n(2) Propose a new method\n\n(3) conduct some experiments to verify the effectiveness of the proposed method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces an improved version of probabilistic Latent Semantic Analysis (pLSA), termed fPLSA (Foundation-Model-Based PLSA), which incorporates Large Language Models (LLMs) to refine the modeling of latent tags in documents for topic modeling. It conducts some experiments to verify the effectiveness of fPLSA." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(1) Insufficient technical contribution: The method utilizes LLMs for tag Description Generation. Specifically, the fPLSA model generates descriptive tags for the document segments by prompting the LLM with segments assigned to a particular tag to produce a cohesive summary that represents what these segments have in common. The parameters of the LLM are kept frozen during the process. This means the LLM is not fine-tuned during fPLSA training but is used in a static manner. While the integration of LLMs into pLSA offers a novel approach to document modeling, the core statistical methodologies underlying pLSA (like the EM algorithm) remain largely unchanged. This may limit the perceived novelty from a methodological standpoint.\n\n(2) Missing necessary experiments: need to involving more baselines that use LLMs for topic modeling, like Pham et al. (2024), Wang et al. (2023) mentioned in the paper. \n\n(3) Poor writing: The transition of some contents are abrupt and hard to readers to understand the points, such as the first and second paragraphs in the introduction.\n\n(4) Missing Implementation Details: all the prompts used in the experiment are not specified such as those for fPLSA and GenOutline (a baseline)\n\n(5) Unclear motivation of the experiment setting: the paper uses GPT-4 for clustering and tagging while using ChatGPT to measure the accuracy. The authors explain it’s because GPT-4 may have data contamination issues on some benchmarks. I think this explanation is lame and need more clarifications while potentially leading to unfair comparison." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- How would fPLSA perform when applied to a single document at test time, especially if that document differs significantly in structure or content from the training set? Can pre-learned tags from similar training data reliably generalize to new documents in such cases, or would fine-tuning on representative samples be necessary to improve performance?\n- Given fPLSA’s structured tagging capabilities, could the authors discuss its applicability to downstream tasks like structured text summarization and content retrieval? Prior research on text summarization with text segmentation has demonstrated that segmenting texts by themes can enhance summarization quality [1]. Could fPLSA’s tags be similarly used to segment and summarize each thematic section, creating a coherent multi-level summary? Additionally, might these tags support content retrieval or indexing by allowing documents to be searchable by thematic segments? Including a brief paragraph on such applications could highlight the contribution's versatility.\n- Can the authors provide insights into fPLSA’s computational cost compared to the baselines? For instance, would a less resource-intensive model (like a smaller language model) yield competitive results without the same computational burden?\n- How sensitive is fPLSA to the choice of segment granularity (sentence, paragraph, etc.)? In testing, did certain segmentation approaches yield more cohesive or meaningful tags, and if so, could the authors elaborate?\n- Since pre-trained LLMs may encode biases, did the authors observe any potential bias issues during fPLSA’s tagging process? If so, what mitigation strategies might they recommend for fair and balanced tag generation?\n\n**References**\n1. Semantic Self-Segmentation for Abstractive Summarization of Long Documents in Low-Resource Regimes. AAAI 2022." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- **Innovative Approach:** fPLSA is a well-conceived combination of probabilistic topic modeling and LLM-based embedding, creating a tagging system that captures both low- and high-level semantics. This approach enables a nuanced understanding of document structure that extends beyond traditional methods, addressing complex relationships within text segments.\n- **Diverse Evaluation:** The method is rigorously evaluated across multiple datasets, including narrative, mathematical, and multi-step reasoning tasks, demonstrating consistent performance improvements in text reconstruction and sampling diversity. This diversity in datasets reinforces the robustness and generalizability of the approach.\n- **Potential for Cross-Domain Applications:** fPLSA’s ability to structure and tag text meaningfully is a powerful tool for hierarchical content generation, segmentation, and structured summarization, with substantial applications across various domains, such as education, content generation, information retrieval, and summarization.\n- **Foundation for Future Research in Unsupervised Document Tagging:** fPLSA provides a strong foundation for future work in unsupervised document tagging and text segmentation. Its hierarchical tagging approach encourages further exploration in transfer learning, document summarization, and adaptive segmentation, inspiring new research directions for improved document understanding and organization." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces fPLSA (foundation-model-based Probabilistic Latent Semantic Analysis), a novel approach for identifying latent semantic structures in document collections by combining traditional PLSA with the contextual understanding of large language models (LLMs). fPLSA enhances probabilistic clustering and unsupervised topic modeling by assigning semantic \"tags\" to document segments through an iterative Expectation-Maximization (EM) process, where each tag captures both local meaning and broader document context. This structured tagging approach enables fPLSA to better capture complex segment relationships, making it valuable for hierarchical sampling, document analysis, and potentially other downstream tasks such as structured summarization. The paper demonstrates fPLSA’s effectiveness across diverse datasets—narrative (story writing), problem-solving (math), and multi-step reasoning—showing improvements in text reconstruction likelihood and Hits@K accuracy, underscoring its robustness and versatility." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- **Single-Document Applicability:** fPLSA heavily relies on cross-document patterns during training, which is not fully addressed in terms of single-document use cases. At test time, users often only have one document. It would be beneficial to clarify how fPLSA’s pre-trained tags would generalize to individual documents without access to cross-document patterns. For instance, can the model effectively apply pre-learned tags from similar training data to new documents?\n- **Lack of Efficiency Analysis:** Given fPLSA’s reliance on LLMs, a discussion on computational efficiency would be valuable. While LLMs are powerful, they are computationally expensive. Addressing the practical feasibility of deploying fPLSA at scale (or proposing more efficient variations) would make the paper’s findings more actionable.\n- **Potential LLM Biases:** Since fPLSA uses pre-trained LLMs to assign tags, there is a risk of encoding biases from the LLM's training data into the tags. The authors could explore ways to mitigate or assess the impact of these biases, especially for datasets or domains sensitive to fairness and accuracy.\n- **Segmentation Granularity:** The paper does not discuss how sensitive fPLSA is to the choice of segment granularity (e.g., sentence, paragraph) and whether different segmentation approaches yield more cohesive or meaningful tags. Further examination of this could provide clarity on best practices for applying fPLSA across different document types and tasks.\n- **Potential for Downstream Applications:** Although the paper’s results demonstrate fPLSA’s effectiveness in hierarchical sampling, the model's broader potential in downstream tasks is not explored. Given the rich, hierarchical nature of fPLSA tags, they could be valuable for applications like multi-level text summarization, where each tag could represent a theme or section for summarization. Exploring these applications would broaden fPLSA’s impact." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- I'm a bit confused by the relation between w, x, and d. If $w_{1:n}=x_k\\subset d$, how is $p(x_k|d)$ modeled in (4)? Why is it necessary to include both $x_k$ and $d$ as conditional terms?\n\n- Context window: I'm not sure I understand how the segments are selected in this article. Is a segment a fixed-length sequence of tokens? Are there any overlaps between different segments? In Line 238, the authors mentioned \"we use a context window size of 2 on WritingPrompts and use unlimited context window\". How should the \"unlimited context window\" be interpreted?\n\n- According to [1], the latent variable ($z$ in [1]) is supposed to be categorical. This article borrowed the same concept from [1] but I'm not sure whether this article follows the original setup. The authors did mention that they \"set the number of tags to 100\", but the example tags in Table 3 showed that the tags are natural language descriptions rather than categorical labels. I wonder how the tags are generated, and if calling it \"latent\" is still appropriate.\n\n- In (5), $t_k$ is sampled condition on $x_k$, which is later used to estimate the probability of reconstructing $x_k$. Is this a typo? Doesn't this lead to data leakage and make the results of (5) unfairly high?\n\n- For BBH, I'm not sure why it is necessary to \"use the step-by-step solutions produced by their automatic Chain-of-Thought\nprompt inference algorithm for clustering and tagging\". Does it mean that a part of the (ground-truth) solutions is utilized as the prompt to the model for problem-solving? I think this is a huge data leakage issue and would greatly undermine the soundness of the evaluation of the proposed method.\n\n- Since tag generation is a recursive process, what would the token consumption be for achieving the presented results? How about the baseline models?\n\n[1] Hofmann, T. \"Probabilistic latent semantic indexing.\" Proceedings of the 22nd annual international ACM SIGIR conference on Research and development in information retrieval. 1999." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The article is generally well-written except for the technical part, which I find somewhat confusing. According to the article, fPLSA's strengths lie in its enhanced semantic understanding, leveraging LLMs for capturing nuanced document structures beyond lexical co-occurrence. This approach yields more accurate text reconstruction and supports hierarchical sampling, producing diverse, high-quality outputs in applications like story generation and problem-solving. Its specific and detailed tagging outperforms generic LLM-based tags, enhancing content generation. Additionally, fPLSA’s unsupervised clustering reduces the need for labeled data, while its demonstrated adaptability across domains and improved Hits@K accuracy make it a versatile, efficient tool for semantic analysis and structured text generation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces fPLSA, a foundation-model-based extension of Probabilistic Latent Semantic Analysis (PLSA), aimed at discovering semantic structures within document collections through clustering and tagging of text segments. Unlike traditional topic modeling, which often relies on word co-occurrences, fPLSA leverages Large Language Models (LLMs) to understand segment-level semantics in a broader document context. It applies an Expectation-Maximization (EM) algorithm to iteratively refine segment tags, enhancing both text reconstruction accuracy and hierarchical sampling. Experimental results on datasets for story writing, math, and reasoning show that fPLSA significantly outperforms traditional and LLM-based tagging methods in text reconstruction and solution diversity. This makes it suitable for generating effective problem-solving guides and diverse text outputs across varied tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The usage of math symbols is sometimes confusing. Also not required, it is suggested that the authors follow the [default notation](https://github.com/ICLR/Master-Template/raw/master/iclr2025.zip) for a clearer presentation of the equations.\n\n- The proposed method is not thoroughly explained. For example, the computation of some terms in (4) is missing, as well as its optimization algorithm, e.g., how to calculate $p(x_k|d)$. From my perspective, if one chooses to express the idea using math formulas, then every term should be clearly explained except for the cases where it is extremely obvious, which I think does not apply to (4).\n\n- A figure or algorithm may better explain the proposed method.\n\n- The authors use GPT-4 for clustering and tagging but GPT-3.5 for response generation and did not provide experimental results on other combinations. The performance of the proposed method therefore may not be universally applicable.\n\n- Potential data leakage issues (detailed in Questions).\n\nOverall, I think the approach proposed by this article is rather straightforward and could be easily described with better clarity without introducing any formulae, perhaps except for the motivation part. In addition, it seems that this article may find a broader audience in the pure NLP community rather than a mixed community of different machine learning topics. Therefore I would recommend to submitting this manuscript to ACL ARR venue instead of machine learning conferences." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024fplsa,\ntitle={f{PLSA}: Learning Semantic Structures in Document Collections Using Foundation Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4NsYCAxubi},\nnote={under review}\n}" }, "abstract": { "value": "Humans have the ability to learn new tasks by inferring high-level concepts from existing solution, then manipulating these concepts in lieu of the raw data. Can we automate this process by deriving latent semantic structures in a document collection using foundation models? We introduce fPLSA, a foundation-model-based Probabilistic Latent Semantic Analysis (PLSA) method that iteratively clusters and tags document segments based on document-level contexts. These tags can be used to model the structure of given documents and for hierarchical sampling of new texts. Our experiments on story writing, math, and multi-step reasoning datasets demonstrate that fPLSA tags help reconstruct the original texts better than existing tagging methods. Moreover, when used for hierarchical sampling, fPLSA produces more diverse outputs with a higher likelihood of hitting the correct answer than direct sampling and hierarchical sampling with existing tagging methods." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Natural Language Processing", "Large Language Models", "Document Analysis", "Latent Semantic Analysis" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/45e6e2aca511f56b805e726c2ebb74e92a7084f7.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "fPLSA: Learning Semantic Structures in Document Collections Using Foundation Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4NtrMSkvOy
Enhance the Transferability of Adversarial Attacks through Channel Pruning
main
Active
adversarial attacks transferability;channel pruning;model augmentation
alignment, fairness, safety, privacy, and societal considerations
3;3;3;3
4;5;5;5
1;1;2;2
2;1;2;3
2;1;2;1
3
4.75
1.5
2
1.5
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- What is the size of the test set (out of 2000 images subset) used for evaluation? \n- On line 329, the authors mention \"almost correctly classified by all the evaluated models\" about the chosen subset. What are the exact accuracy numbers for every network?\nRefer to weakness section for more comments." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The attack is transferable across multiple network architectures, within CNNs as well as from CNNs to transformers.\n- Intuitive, novel method for increasing transferability of the black-box attacks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a transferable black-box attack using the concept of model augmentation through channel pruning and knowledge distillation. The authors show that the transferability of existing black-box attacks is limited due to their uneven focus on the channels. The authors also introduce a gradient regularization to enhance the transferability further. The evaluation is done using a subset of 2000 images from ImageNet." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper does not follow the format guidelines closely. Tables 1 and 2 are out of the paper margin significantly. \n- The presentation of the paper needs to be improved. The paper contains many grammatical errors, like a period in the middle of the sentences and incorrectly capitalized words. For example, on line 354, it should be \"Table\" instead of \"table\"; \",,\" on line 355; should be \"pruning\" instead of \"running\" on line 252; the sentence on lines 226-227 is incomplete; there is a blank line in Table 3 and 4 before the first result row. \n- What is the size of the test set (out of 2000 images subset) used for evaluation? \n- On line 329, the authors mention \"almost correctly classified by all the evaluated models\" about the chosen subset. What are the exact accuracy numbers for every network?\n- Limitations like an increase in computing due to model augmentation and the trade-off between the transferability and number of models should be discussed." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. What are the key theoretical differences between pruning techniques like channel, kernel, and block pruning? Do these techniques exhibit different attack performances in both white-box and black-box settings?\n\n2. Could other model augmentation-based attack methods be further improved by incorporating the proposed initial parameter optimization and gradient regularization strategies?\n\n3. Could the authors provide additional details on how adversarial examples are trained on the ensemble of pruned models? In the overall loss function, does knowledge distillation by minimizing L_{ce} conflict with the perturbation training, which involves maximizing L_{ce}? Additionally, how can the proposed five loss terms be effectively trained, and how should their corresponding hyperparameters be adjusted?\n\n4. Could the authors provide additional details on why gradient regularization results in only slight changes in ASR across different layers of the surrogate model? Additionally, why is gradient regularization applied to only one layer rather than multiple layers?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The authors propose using channel pruning to enhance the transferability of adversarial attacks, along with knowledge distillation to recover the classification accuracy of pruned models.\n\n2. During the perturbation training, regularization of important feature maps is introduced to reduce the gradient variance to further enhance the attack performance.\n\n3. Experimental results on various target models demonstrate that the proposed transfer-based method outperforms baseline methods in attack effectiveness." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors propose a model augmentation-based method to improve the transferability of adversarial attacks. To enhance performance in black-box settings, they first introduce the technique of channel pruning to create a self-ensemble surrogate model, which mitigates the overfitting on redundant features or channels. Additionally, the authors integrate both the knowledge distillation and gradient regularization into this ensemble model to further enhance the transferability across various target models. Experiments conducted on multiple CNN and ViT networks using the ImageNet benchmark dataset demonstrate that the proposed model augmentation method achieves relatively high attack performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The motivation for introducing channel pruning is not clearly explained. For example, why was channel pruning selected over other pruning techniques, such as kernel or block pruning? Furthermore, as noted in lines 255-257, the L2-norm distance metric lacks robustness due to its sensitivity to outlier noise. Does this criterion maintain consistent performance across different surrogate models?\n\n2. The submitted manuscript lacks certain details regarding initial parameter optimization. For example, the network structure of the self-supervision (SS) module is not specified. Additionally, the authors introduce the L_{ss} loss, as defined in lines 295-296, to maximize the similarity of all positive and negative data pairs, which is inconsistent with standard contrastive learning practices.\n\n3. In Section 4.3, the authors propose regularizing gradient variance within an intermediate feature map but do not specify how this layer is selected within the surrogate model. Additionally, as mentioned in lines 322-323, the overall loss function is designed to minimize each loss term including the L_{ce} loss, which may conflict with the goal of crafting adversarial examples by maximizing L_{ce}. Furthermore, the details regarding the optimization of this overall loss function are not clearly explained.\n\n4. As shown in Tables 3-4, the experiment is conducted on only one surrogate model, which does not provide a comprehensive evaluation. Furthermore, the proposed GRASP method is not directly comparable to other model augmentation methods, as these methods do not incorporate knowledge distillation or gradient regularization.\n\n5. The authors claim that reducing gradient variance can balance the importance of different channels within a layer. However, as shown in Figure 5, the minimal change in attack success rate after regularizing different layers does not provide strong support for this claim." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- Why are some channels redundant? Why can this explained with over-parameterization? (Line 47.)\n- Why are the pruned surrogate models containing denser information if initial parameter optimization and regularization applied? (Figure 1)\n- Why do samples overfit on the highly repetitive channels? \n- How are the hyperparameters set in 4.3?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The paper is easy to read through." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a method for a transfer-based blackbox attack.The main idea for the method is to augment the surrogate models by differently pruned models, and generate the adversarial examples. First to augment the models, the model is pruned with the predetermined channel pruning rate. To train the augmented models, the knowledge distillation is used to match the accuracy of the pruned model, incorporating self-supervision and input augmentation as terms in the loss function. The authors present explanations and experiments to further demonstrate their idea. The experiment is done on the ImageNet-like dataset on targeted and untargeted attacks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The weakness of the paper is three-folds.\n- The terminology used in the paper lacks objective justification and includes many subjective and inaccurate expressions.\n-- See questions.\n- Lack of Novelty.\n-- The main methodology comes from the idea of channel-pruning based model augmentation. There are plenty of existing studies that analyze channel-wise pruning in terms of robustness, and numerous studies demonstrate that model augmentation can achieve higher transferability. [1-3] As it stands, there doesn't seem to be much take-away for the reader from the combined ideas in this study.\n- The overall explanation and experiments are insufficient.\n-- Lack of Validation to the proposed method.\nThe experiments are limited to a single dataset, which does not demonstrate generalized results.. \nhe paper primarily explains the performance of the proposed method through intuition and explanation, but lacks supporting evidence. Adding intermediate empirical or theoretical validations for the proposed method would improve its persuasiveness.\nTo enhance the persuasiveness of this study, more ablation studies are needed.\n\n-[1] Bai et al., \"Improving Adversarial Robustness via Channel-wise Activation Suppressing\", ICLR 2021\n-[2] Borkar et al., Defending Against Universal Attacks Through Selective Feature Regeneration, CVPR 2020\n-[3] Tramer et al., \"Ensemble adversarial training: Attacks and defenses\", ICLR 2018" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "-\tWhy did you do not discuss in the related work the loss-based transfer methods such as for example [C] or [D]? \n-\tFor the contrastive learning, how did you choose these three transformations? Why not other ? \n\n[C] Naseer, M., Khan, S., Hayat, M., Khan, F. S., & Porikli, F. (2021). On generating transferable targeted perturbations. In Proceedings of the IEEE/CVF International Conference on Computer Vision (pp. 7708-7717).\n\n[D] Zhao, A., Chu, T., Liu, Y., Li, W., Li, J., & Duan, L. (2023). Minimizing maximum model discrepancy for transferable black-box targeted attacks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (pp. 8153-8162)." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "-\tStudying the transferability of adversarial examples is an important topic.\n-\tThe paper is easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Adversarial examples have recently received much attention in the black-box transfer-based attack scenario due to its more realistic attack setting. To enhance the transferability of the generated adversarial example, the paper introduces GRASP, a model augmentation method that uses channel pruning to generate different models. These different pruned models are used to generate an adversarial example that may be less specific to the potential channel redundancy of the source model." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "-\t**Experiments are insufficient and do not sufficiently support claims.** The comparison of the proposed method against other model augmentation methods is only done for one source model (ResNet50). Despite the source network being the same for Table 3 and Table 4, the targeted networks are different, which is surprising. The superiority of the proposed method cannot be established. Moreover, since the proposed method uses three pruned networks to generate the adversarial example, I expect the authors to compare their method against an ensemble method to ensure that the gain obtained is not due to ensembling networks.  It would be interesting to compare against ensemble-based methods such as [A] and [B]. \n\n\n- **Motivation of the method.** In the introduction, the authors say, “On the contrary, some\nof them are vague and seem to only contain weak features about the object in the input image. If\nwe only conduct adversarial attacks based on original CNN models, the adversarial samples tend to “overfit” on those highly repetitive features.” I do not understand why, if a feature is not useful for predicting an object (whereas this feature may be useful for another object), this feature would be exploited by an attack that tries to fool the network. I would expect the attack to disrupt features responsible for predicting the target class and not those that are not useful. To validate their intuitions, can the authors provide an experiment or proof showing that adversarial examples tend to “overfit” those highly repetitive, unuseful features? Moreover, I find it strange that increasing the pruning rate, which is the core of the method, degrades the transferability of the generated adversarial examples. Can the authors clarify this point, please? \n\nTypos:\n- In Table 3 and 4, there is a blank row. \n- Table 1 and 2 are overextended. \n\n[A] Tang, B., Wang, Z., Bin, Y., Dou, Q., Yang, Y., & Shen, H. T. (2024). Ensemble Diversity Facilitates Adversarial Transferability. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 24377-24386).\n\n[B] Chen, B., Yin, J., Chen, S., Chen, B., & Liu, X. (2023). An adaptive model ensemble adversarial attack for boosting adversarial transferability. In Proceedings of the IEEE/CVF International Conference on Computer Vision (pp. 4489-4498)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024enhance,\ntitle={Enhance the Transferability of Adversarial Attacks through Channel Pruning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4NtrMSkvOy},\nnote={under review}\n}" }, "abstract": { "value": "Recent studies have shown that neural networks are vulnerable to adversarial attacks, where attackers generate adversarial samples by imposing tiny noise. The tiny noise can not misguide human perception, though leading the neural networks to generate wrong predictions. Transfer-based black-box attacks play a more significant role in recent studies due to their more realistic setting and considerable progress in performance. Previous studies have shown that some different channels of the same layer in convolution neural networks (CNN) contain lots of repetitive information, and we find that existing transferable attacks tend to exploit those redundant features more, which limits their transferability. Hence, we advocate using channel pruning and knowledge distillation to conduct model augmentation. In addition, we introduce a method of regularization on the gradients of intermediate feature maps of augmented models, which further enhances the transferability of our method. Comprehensive experiments demonstrate that imposing our method of model augmentation on existing methods can significantly improve the transferability of adversarial attacks in untargeted or targeted scenarios. Furthermore, our method outperforms state-of-the-art model augmentation techniques without the usage of additional training datasets." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "adversarial attacks transferability", "channel pruning", "model augmentation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/657596a1ed284099bffc1ea50db75407c6cc146f.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/cd3cd0f87f68487a10b286a556c8e7ee2803be02.zip" }, "title": { "value": "Enhance the Transferability of Adversarial Attacks through Channel Pruning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4O0v4s3IzY
On the self-verification limitations of large language models on reasoning and planning tasks
main
Active
Large Language Models;Reasoning;Planning;Self-Critique;Verification
foundation or frontier models, including LLMs
5;5;6;8
4;4;5;4
3;2;2;3
3;2;2;2
2;3;3;3
6
4.25
2.5
2.25
2.75
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How do the authors anticipate their findings will generalize to other complex reasoning tasks not covered in the study? Can the observed ineffectiveness of self-critique mechanisms be extrapolated to different types of LLMs or reasoning models? \n2. Could the authors elaborate on the choice of domains for the study? Why were these specific domains chosen, and how do they represent the broader spectrum of reasoning tasks applicable to LLMs? \n3. What additional mechanisms or modifications do the authors suggest could potentially improve the self-verification capabilities of LLMs? Is there ongoing work to develop more effective internal critique mechanisms within LLMs? \n4. How do the authors envision the impact of their findings on the future development and deployment of LLMs in safety-critical applications? What precautions or additional measures would they recommend based on their study’s outcomes?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper addresses a novel aspect of LLMs—self-critique and iterative verification—that is underexplored in the existing literature. It challenges the assumption that LLMs can effectively self-critique by demonstrating that external verification offers more reliable improvements. \n2. The experimental setup is clearly described, allowing for reproducibility and understanding of how iterative prompting affects LLM performance in reasoning tasks. The paper methodically outlines its methodology and the rationale behind using specific domains for testing (Sections 4 and 3). \n3. The findings significantly contribute to understanding LLM limitations in self-verification tasks, which is critical for deploying these models in real-world applications where accuracy and reliability are paramount. \n4. The study is well-structured and robustly empirically analyzed, providing a comparative assessment of LLMs with and without external sound verifiers." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper evaluates the self-verification abilities of LLMs in reasoning and planning tasks using iterative prompting and critiquing. It contrasts the performance of LLMs self-verifying their solutions against external sound verifiers across three domains: Game of 24, Graph Coloring, and STRIPS planning. Findings indicate that LLMs underperform in self-verification and that external sound verification significantly improves the accuracy of solutions. The study suggests the ineffectiveness of self-critique mechanisms in LLMs and recommends integrating external verifiers for better performance in reasoning tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper’s focus on only three specific domains might limit the generalizability of the findings. While these domains are relevant, more varied tasks could provide a broader understanding of LLM capabilities across different reasoning types (Section 3). \n2. The analysis of the self-critique mechanism lacks depth regarding why LLMs fail at self-critique. Specific instances of LLM outputs and their failures would enrich the discussion by pinpointing the flaws in LLM reasoning strategies (Section 5.1). \n3. There is no detailed discussion on the computational cost and efficiency of using external verifiers versus self-verification. This information would be crucial for practical implementations where resource constraints are a significant consideration. \n4. The paper does not thoroughly explore the theoretical implications of its findings on the computational complexity theories surrounding LLMs and self-verification. A deeper theoretical analysis could provide insights into the fundamental limitations of LLM architectures (Section 2)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "NA" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See point 2) in weaknesses and more specifically: are there any arguments that the findings do not only hold for the examined LLMs but in general for transformer-based auto-regressive models, or even other models like discrete diffusion models.\n\n\nMinor comment:\nThe use of \\citep and \\citet is not correct. Also there are some small spelling/typesetting issues here there that should be easily fixable." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1) The paper is really well written and easy to to follow. The non-technical nature might have helped here. \n2) I enjoyed reading the (extensive) related work section where problems in prior work are made explicit.\n3) All claims made in the paper are substantiated by appropriate experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This is an experimental paper studying the the popular technique of self-verification of LLMs for enhancing apparent reasoning capabilities. The stringent experimental protocol shows that the self-verification does not really work and shows that other techniques like using formal (symbolic) verifiers are better suited to achieve automated reasoning with LLMs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1) For someone familiar with the field the findings might be a little bit obvious. However, given the amount of papers being published using self-verification, I nevertheless believe this to be an important study when it comes to combating the delusion of self-verification.\n\n2) Given that this is an experimental study it might be good to also point out weaknesses in the experimental protocol. Specifically, making explicit all the assumptions that were made and what might change if they were not to hold. Concretely, what gives the authors the confidence that their findings have a high probability of standing the test of time." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please discuss the points mentioned in the weakness part." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The presentation and writing are clear.\n2. The authors made adjustments to the test datasets to ensure their validity. For example, they generated new instances to prevent test set memorization and modified the task solution format to better evaluate the self-verification pipeline.\n3. The systematic analysis of self-verification provides insights into the cases where the verification module can help improve LLM performance." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper discusses the effect of self-verification in LLM reasoning and planning, challenging the assumption that verifying correctness is easier than generating solutions. Through experiments in three domains (Game of 24, Graph Coloring, and STRIPS planning), it finds that self-verification does not improve accuracy and can even reduce performance. In contrast, a \"sound external verifier\" significantly enhances accuracy, with simpler re-prompting methods maintaining most of these benefits." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The experiment design is not entirely persuasive.\nThe authors attempt to challenge the assumption that verifying correctness is easier than generating solutions, suggesting that self-verification does not improve performance. However, task difficulty should be considered. Apart from Blocksworld, the accuracy in other tasks is quite low. According to Table 2, \"LLM Verification Results,\" verification performance varies across tasks, especially in terms of the False Negative Rate (FNR). In Blocksworld, which has the lowest FNR, self-verification actually improves task accuracy from 40% to 55%. This suggests there are cases where verification is easier than generation and where self-verification contributes positively to task accuracy. More tasks should be added for more comprehensive results.\n\n2. The authors use a “sound” verifier as a comparison, but it’s unsurprising that a ground-truth verifier significantly improves performance. With ground-truth verification, the model can eliminate incorrect answers and focus on sampling correct ones, or at least provide ground-truth information. Taking the first point into account, a more nuanced conclusion could be that in tasks where verification is easier than generation, self-verification helps; otherwise, it has no benefit or even harms performance. The improvement limit for self-verification is thus bounded by the effectiveness of the “sound” verifier. \n\n3. The exact GPT version should be specified, and more models, particularly advanced ones, should be tested for comprehensive results." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "I don't have more questions but authors are encouraged to address my concerns in the weakness section." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "This paper has the following strengths.\n\nOriginality:\nThis paper systematically evaluates LLMs' self-verification capability by careful ablation and comparison to symbolic processors. It finds that LLM verification is not very helpful in some planning cases. This is novel and interesting.\n\nQuality:\nThis paper considers problems of memorization and lack of ground truth during evaluation, which are important concerns.\n\nClarity:\nThe experiment setup is quite clear.\n\nSignificance:\nKnowing what LLMs are actually doing when people claim they can do a lot of things is very important." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper mainly evaluates the self-critique and verification capabilities of LLMs in comparison to sound verifiers. It concludes that the self-verification loop conducted by LLMs is not as helpful as previous works claimed. On the contrary, it might hurt LLM performance in some planning domains." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Is it fair to compare LLMs with oracle verifiers? The finding that LLM self-critique can sometimes downgrade their generation performance is interesting, but oracle processors are not always accessible in all tasks (as the authors mentioned in their paper, some tasks such as creative writing do not have a ground truth answer). I'm not surprised that oracle verifier is improving LLM performance, but I wonder if it is possible that LLM can serve as a decent alternative when sound verifier is absent in general areas?\n\n2. The task domains are constrained while the general conclusion is very strong. The authors evaluated three main planning tasks and one LLM (GPT-4) with four datasets, with no clear objective as to why the conclusion drawn from these three specific tasks can be generalized. It is unclear why these tasks and datasets can represent general LLMs' lack of self-verification capability in a wide ranges of tasks.\n\n3. Some other details in this paper are also overclaimed, e.g., on page 2, the authors claim \"...the state-of-the-art GPT-4\" while GPT-4 is not SotA in many benchmarks anymore (in [1], inter alia).\n\n4. There are quite some typos. Writing needs to be done with more care.\n\n[1] Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., ... & Ganapathy, R. (2024). The llama 3 herd of models. arXiv preprint arXiv:2407.21783." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024on,\ntitle={On the self-verification limitations of large language models on reasoning and planning tasks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4O0v4s3IzY},\nnote={under review}\n}" }, "abstract": { "value": "There has been considerable divergence of opinion on the reasoning abilities of Large Language Models (LLMs).\nWhile the initial optimism that reasoning might emerge automatically with scale has been tempered thanks to a slew of counterexamples--ranging from multiplication to simple planning--there persists a wide spread belief that LLMs can self-critique and improve their own solutions in an iterative fashion.\nThis belief seemingly rests on the assumption that verification of correctness should be easier than generation--a rather classical argument from computational complexity--which should be irrelevant to LLMs to the extent that what they are doing is approximate retrieval.\nIn this paper, we set out to systematically investigate the effectiveness of iterative prompting in the context of reasoning and planning.\nWe present a principled empirical study of the performance of GPT-4 in three domains: Game of 24, Graph Coloring, and STRIPS planning.\nWe experiment both with the model critiquing its own answers and with an external correct reasoner verifying proposed solutions.\nIn each case, we analyze whether the content of criticisms actually affects bottom line performance, and whether we can ablate elements of the augmented system without losing performance. We observe significant performance collapse\nwith self-critique and significant performance gains with sound external verification.\nWe also note that merely re-prompting with a sound verifier maintains most of the benefits of more involved setups." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Large Language Models", "Reasoning", "Planning", "Self-Critique", "Verification" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/59b96e7d81b724fb9f2c26a190c4ca157110d100.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "On the self-verification limitations of large language models on reasoning and planning tasks" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4OaO3GjP7k
Flat Reward in Policy Parameter Space Implies Robust Reinforcement Learning
main
Active
Reinforcement learning;Flat Minima;Robust Reinforcement learning
reinforcement learning
5;5;6;6
3;3;3;3
3;2;3;3
3;2;3;2
2;2;2;2
5.5
3
2.75
2.5
2
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Do you have an intuition on why SAM doesn't perform better on Walker2d-v3 for high friction factor?\n- Have you tested SAM+PPO on non-MuJoCo environments to assess robustness in discrete action spaces or varying reward structures?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- This paper provides a formal link between flat reward surfaces and robustness in policy space. Proposition 1 establishes a clear theoretical foundation for the paper's main claim.\n- The authors comprehensively test SAM+PPO across multiple challenging environments and scenarios, including noisy actions and varying transition probabilities, to demonstrate robustness.\n- The authors compare SAM+PPO with RNAC, PPO, and RARL, which shows both performance and computational efficiency, which strengthens their findings.\n- The use of reward surface visualizations and flatness metrics strengthens the paper's argument by providing visual and quantitative evidence for the flatness achieved by SAM+PPO." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper investigates the relationship between flat reward maxima in policy parameter space and the robustness of reinforcement learning (RL) agents. It claims that flatter reward maxima lead to more robust policies, particularly against action perturbations. The paper presents a theoretical proposition linking flat reward to action robustness and supports this claim through empirical experiments in MuJoCo environments (e.g., Hopper-v3, Walker2d-v3, HalfCheetah-v3). The authors demonstrate that an RL algorithm enhanced with Sharpness-Aware Minimization (SAM), called SAM+PPO, consistently outperforms standard PPO and a recent robust RL baseline (RNAC) in various robustness tests, including action noise, transition probability changes, and reward function variations. The paper also provides visualizations and quantitative measurements of reward surfaces, further confirming the link between flatness and robustness." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- While SAM is shown to be effective, the paper lacks a discussion of its potential limitations, such as computational overhead or sensitivity to hyperparameter tuning.\n- The justification for reward noise being added during training for reward function robustness evaluation could be clearer: The paper mentions this difference in methodology but could expand on why this is necessary for a valid evaluation.\n- I don't know if the preliminary experiment is best placed in the introduction, it feels a bit out of place for me.\n- typos 234 \"objeective\", 249 \" funciton\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Is there a specific advantage to using PPO with SAM, or could any PG or even AC algorithm be used? It might be that the clipping approximation to the trust region synergizes well with the SAM objective? I think this is an optional extension to the paper." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The authors propose a simple yet intuitive approach for robust RL. I was somewhat surprised that this combination has apparently not been tried in the literature, but a brief literature survey has not brought up any similar algorithms. I actually think the authors are somewhat underselling their contributions here! While SAM has been used to train PPO before, the authors appropriately cite prior work here, previous papers have not drawn any connections to robust RL at all and the authors should feel entitled to proudly claim this connection as their connection! They do not merely provide theoretical backing, as far as I can tell, they make a connection that was wholly absent in cited work.\n\nThe theoretical statements are mostly correct as far as I can tell. See questions below however." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a study on using sharpness-aware regularization to obtain robust reinforcement learning policies. Drawing a theoretical connection between flatness in the reward, action and parameter space to action-robust RL, the authors present both a theoretical justification and experiments to show that the proposed method achieves good robustness properties." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main problem with the paper as it stands are writing problems and baseline comparisons.\n\nEspecially the beginning of the paper, abstract and introduction, suffer from very frequent grammar mistakes which make the paper much harder to read. I strongly encourage the author to revise the paper wrt to the writing.\n\nIn definition 1, I'm unsure if $\\epsilon$ is added to the policy, parameters or action? From the proof it seems this is a parameter perturbation, this should be stated directly. I think adding parentheses in the equation would already make this much clearer, as we have two nested subscripts here.\nIn addition, the state is sampled from the policy, which seems strange?\n\nAs the theoretical statement depends on the Jacobian of the policy network, which is not bounded anywhere, I'm slightly skeptical that the theoretical results are sufficient to practically guarantee robust RL. Does the SAM objective guarantee or incentivize a flat Jacobian?\n\nGiven the surprisingly (?) bad results of RNAC - it barely seems to outperform PPO - I think it would be appropriate to apply SAM+PPO in the same environments as used in the RNAC paper. As far as I can tell, the code is available, so this should be feasible within the rebuttal timeline? If not, I will not hold this against the authors. I think it is important to verify that used examples are not cherry-picked to make the presented algorithm look stronger. This is the higher priority comment in terms of baseline comparisons.\n\nI would encourage the authors to present some additional baselines. I acknowledge that more baselines is a somewhat lazy comment. However, given that there are several different formulations of robust RL, I believe it would be helpful to pick a variety of environments and algorithms presented with different robust formulations for comparison to understand how well the algorithm does in comparison to others. This doesn't have to be many or complex environments, just a larger variety of formalisms. This is a soft concern and not a large barrier to acceptance for me.\nBoth safe-control-gym [1] and Safety Gymnasium [2] provide a variety of tasks and implemented baselines to speed up experimentation.\n\n[1] https://github.com/utiasDSL/safe-control-gym\n[2] https://github.com/PKU-Alignment/safety-gymnasium" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Is the perturbation domain $\\rho$ in Eq.8 known to the agent? Probably the optimization of the objective in Eq.8 needs elaboration, and with pseudo-code.\n\n- Why in \"Nominal\" SMA+PPO still has a higher reward, e.g. Table 1+2, Fig. 3,4. Similarly, experiment in 5.2, why when action noise is small, i.e. even equal to 0, SAM+PPO still performs better than the others, because the objectives of PPO and SAM+PPO would converge to the same one? And in 5.3, SAM+PPO has a higher return, while with variation in Friction Coefficient shows mixed results. \n\n- Joint variation of friction and mass shows quite clear that SAM+PPO is performing better than baselines, except on Walker2d-v3 with a mixed result. Can the authors elaborate on why or provide ablation to explain the mixed performance of SAM+PPO?\n\n\n- The proof of proposition 1 is a bit not standard. The policy is sometimes referred as a distribution, but sometime used as a deterministic mapping. It needs revised." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- Provide a link of flat reward to action robustness. The authors show this through both theoretical results in section 4, and various experiment results. The motivation of having a robust objective is good. The theoretical result seems correct.\n\n- Positive experiment results showing the benefit of optimizing for a flat reward maxima. The authors show this through different experiment settings: variation to physics properties of the underlying MDP, and visualization of the reward surface." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores the impact of flat minima in reinforcement learning (RL), linking flatter reward surfaces to improved model robustness. The authors show that flatter rewards lead to more consistent actions despite parameter changes, enhancing robustness against variations in state transitions and reward functions. The authors show through extensive experiments to confirm that flatter rewards significantly bolster RL model performance across diverse scenarios." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The performance of SAM + PPO is mixed in comparisons to the baselines, e.g. some visible ones at Fig 5.c, 4.b.\n\n- Ablations are not provided to understand how such an objective can bring benefits in comparisons to similar approaches, e.g. RNAC or robust RL." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "I'm really curious about \"flat rewards\" in general. Definitions 1 and 2 seem too strict at first glance (the equalities therein), so it is actually a bit surprising to me that they are even possible at all; however IIUC, Fig 6 does give evidence of this. I think that these definitions can be further elaborated on (do you have a toy example where it is easy to see in parameter or action space?) Realistically, what values of $\\epsilon$ do you think are reasonable? Something like $10^{-11}$ or $10^{-2}$? (I might've missed it somewhere, sorry.) If these are novel definitions not previously given in the literature, that can be stated as a contribution of the paper. I think it can spark future work in both theory and experimental directions.\n\nHere are some follow up questions/comments:\n\n- In Sec 5, how long are those agents trained for? Equal number of env steps for each? How were hparams tuned for each algo?\n- What is the agent's action scale for these environments (cf L337)? What do you do if the noise added is outside the action range?\n- Do you have any ideas about the sharp dropoff in Fig 3b for SAM PPO? it looks interesting, but I'm not sure what to make of it... is there some \"critical\" mass ratio? I.e., if we zoom in, how sharp is that transition, and have you averaged over enough random seeds?\n- you mention \"flatter reward maxima\" in L70. I think a formal definition or good visualization of this phenomenon early on would really improve the paper.\n- How does this work relate at all to other trust region methods like TRPO? How about e.g. [1]\n\n[1]: https://arxiv.org/abs/2103.06257\n\nTypos/minor\n\n- Fig 3 caption \"nomial\"\n- some missing +/- signs in Table 1 (in parens)\n- citations in sec 2 often have a missing leading space.\n- can you improve the visual in Fig 1? I think it's important but not quite capturing the essence. Maybe just to remove axes and grid and zoom in a bit: is there indeed a channel for the agent? It's hard to see\n- The introduction paragraphs have some grammatical issues. A cleanup/re-write here can help to crystallize the main message early on\n\nWith a rewrite to clean up the presentation, deeper explanation for SAM (i)-(iv), and perhaps a few more visualizations, this could be a really strong paper; but unfortunately I don't think it's quite there yet." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- This use of robustness in policy parameter space seems to be fairly new\n- The experiments demonstrate a strong performance boost across a range of perturbations \n- The visualizations in Figure 6 offer an interesting insight into the optimizations produced by PPO vs SAM+PPO. The Hopper example is quite striking. Could you elaborate on the distinction and sharp dropoffs seen there?\n- Theory provides a potential link between flatness in parameter space and action robustness\n- Provided a solid comparison wrt computational overhead / sample complexity and wall time versus other algorithms\n- Figure 5 is quite nice, I think it should be emphasized\n\nOverall, the paper seems like a nice first step in the direction of understanding the relationship between robustness in reward, policy parameter, and dynamics spaces. The notion of \"flat rewards\" is an interesting one." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work proposes a new method to ensure robustness in RL based on variations in the loss landscape: \"SAM\": Sharpness-Aware Minimization. By posing the policy optimization as a min/max objective with respect to perturbations in the parameter space, the authors show robustness to changes in reward and dynamics. A theoretical result is given, linking parameter and reward robustness, and a diverse set of experiments on 3 MuJoCo environments is provided." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**Writing:**\n\n- Overall, I think the clarity of the paper can be enhanced with a re-write fixing grammar and overall structure:\n- It would be helpful for example, to also include some visualizations of the definitions 1 & 2.\n- Also, Proposition 1 and the following remarks are not very clear. As an example, for Prop1, if I understand correctly, the result would be better phrased as \"if $\\mathcal{E}$-flat, then $\\Delta$-robust, with $\\Delta \\leq ...$ otherwise the current phrasing is a bit confusing.\n\n\n**Discussion:**\n\n- The discussion of the main idea, \"SAM\" is lacking:\n- After it is introduced in Sec 3.3, the authors give a way to solve the optimization problem in Eq (3) by their steps (i)-(iv). However, (to me at least), it is not clear why this method is used. Is there prior work demonstrating the efficacy of this method? Are there experiments or maybe some minimal example illustrating the utility of this setup? E.g. why is $\\epsilon$ chosen to be in the direction of the previously computed gradient, if theoretically it should represent an arbitrary direction in the ball. \n- At the very least, can the authors provide some visual demonstration as to what is happening here in the loss landscape? Getting a better intuition would help to understand the core method of the paper. \n- Remark 1.1 seems to be a restatement of Prop 1 unless I am missing something. Could you please explain?\n- Remark 1.2 can be improved by using more technically accurate statements (i.e. what is meant by \"when a reward function slightly changes\")? What is meant by the \"direct [correspondence] to the changes of loss function in the supervised learning case\"? I think the latter is very unclear, and maybe even misleading.\n\n\n**Experiments:**\n- My only issue with the experiments (minor) is that you are missing RNAC in Table 2 (why?). Also why not compare against RARL? Missing explanation of the shaded regions in each figure caption." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024flat,\ntitle={Flat Reward in Policy Parameter Space Implies Robust Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4OaO3GjP7k},\nnote={under review}\n}" }, "abstract": { "value": "Investigating flat minima on loss surfaces in parameter space is well-documented in the supervised learning context, highlighting its advantages in model generalization. However, limited attention has been paid to the reinforcement learning (RL) context, where the impact of flatter reward in policy parameter space remains mostly unexplored. Beyond the naive guessing from the lesson of supervised learning, which makes us anticipate a link from flat rewards to the enhanced generalization, we here aim to formally bridge the flatness in reward surface to the robustness of RL models. For a policy model case, where the deep model determines actions, the flatter behavior of rewards against the parameter perturbations primarily leads to consistent rewards against perturbed actions. Moreover, the action robustness further affects to the robustness against other variations from the changes of state transition probabilities and reward functions. We extensively simulate various RL environments, confirming the consistent gains of flatter reward in bolstering the robustness of RL in varying circumstances, including action selection, transition dynamics, and reward functions." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Reinforcement learning", "Flat Minima", "Robust Reinforcement learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/11a15eb57bf648fedf0b0d59787bd7e338cdf1da.pdf" }, "presentation": null, "primary_area": { "value": "reinforcement learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Flat Reward in Policy Parameter Space Implies Robust Reinforcement Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4PlbIfmX9o
Graph Assisted Offline-Online Deep Reinforcement Learning for Dynamic Workflow Scheduling
main
Active
workflow scheduling;graph attention neural network;reinforcement learning;online learning
optimization
3;5;5;6
4;3;3;3
2;3;3;2
1;3;2;3
3;3;3;3
4.75
3.25
2.5
2.25
3
-0.927173
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Besides the concerns raised in the weakness part, I have the following additional questions:\n\n1. PPO has also been applied to solve other combinatorial optimization problems like routing problems, where the horizons are also very large. Could you give some intuitions why PPO is particularly unstable for this problem?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is well-written and easy to follow\n2. Special designs on the actor and critic network to have more efficient embeddings and long-range interaction modeling\n3. customized gradient for stabilizing the PPO training for the online settings\n4. Experiments are conducted on many online and offline settings" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel Graph-assisted Offline-Online Deep Reinforcement Learning (GOODRL) approach for Dynamic Workflow Scheduling (DWS) in cloud environments. The authors introduces three main innovations: a task-specific graph representation with a Graph Attention Actor Network for focused task assignments, a system-oriented graph representation with a Graph Attention Critic Network for real-time state evaluation, and a hybrid offline-online RL method to improve adaptability. The offline stage uses imitation learning for stable initial policy development, while the online stage applies advanced PPO techniques for continuous adaptation. Experiments demonstrate GOODRL’s superiority over state-of-the-art methods in minimizing mean flowtime and enhancing performance in several offline and online settings." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "This paper presents a comprehensive learning pipeline for addressing the dynamic workflow scheduling (DWS) problem. I appreciate the authors for their efforts in adapting various components to suit the unique challenges of DWS.\n\nThe primary concern with this paper is the applicability of the proposed pipeline. Many of the modifications and design choices appear closely tailored to DWS, leaving it unclear how generalizable this approach might be to other scheduling problems, such as flexible job shop scheduling. Can these designs be readily adapted for other problem domains? The paper would be significantly strengthened by demonstrating the pipeline’s transferability to other scheduling scenarios.\n\nSeveral techniques are introduced throughout the pipeline, though not all are rigorously validated in the ablation study. A more thorough investigation into the contributions of each component would enhance our understanding of their individual benefits.\n\nOverall, I appreciate the contributions of this work and currently lean toward a borderline acceptance." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. In a real-world data cluster, how does the arrival pattern change over time? Consider plotting the 95%-99% quantile of the number of simultaneous arrivals. Please also consider adding other more background information.\n2. How long does the model take to make a decision at inference time, compared to prior approaches?\n3. It seems that in Table 1 and 2, the arrival rates during training and testing are always the same for each scenario. However, in a real-world data center, the arrival pattern might fluctuate over time, especially in the case of extreme events (e.g., holidays or deadlines). How robust is your approach to such distribution shifts?\n4. In Table 1 and 2, \"Ours-Offline\" already achieves a very good performance. If due to distribution shifts, the offline version gets a much lower performance, can the online algorithm quickly adapt to such changes?\n5. Many real-world scenarios involve some type of resource contention or performance interference. For example, two tasks are both memory intensive, so maybe we should allocate them on different machines. How does GOODRL address this issue?\n\nMinor:\n- Line 45, \"In fact existing\" --> \"In fact, existing\"" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper has many diagrams, which help the readers to understand.\n- GOODRL shows strong performance against other baselines." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Prior works often consider homogeneous setups and static conditions, and fails to consider the dynamic nature of the workflow scheduling problem. To this end, the paper proposes a GNN-based DRL approach with an offline stage as well as an online stage." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- My main concern is that the paper seems to be applying some existing algorithms to a scheduling problem. There are some simple modifications at different parts of the overall method, and ended up giving us good performance. However, what are the broader insights of this work?\n- Consider adding more background on the DWS problem and studies on real traces." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- How does this model differ from the model presented in Wang et al 2022 [7]?\n\n- How does the heterogeneity of the agent-task is accounted for in the graph representation?\n\n- It is unclear what the novelty of this work is compared to similar works published in Multi-Agent Coordination, and Task Allocation and Scheduling Domains." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- Clear and detailed explanation of the approach being used.\n- Easy to understand figures.\n- Compared against Multiple Benchmarks and show that the model outperforms the baselines in most instances" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents Graph assisted Offline-Online Deep Reinforcement Learning (GOODRL) for Dynamic Workflow Scheduling for Cloud Computing. The presence of heterogenous configurations, dynamic arrival of workflows, and constant evolving environment makes this a challenging problem for State of the Art Models.\n\nThe Contributions presented in the paper are:\n\n1) A Task-Specific Graph Representation and Graph Attention Actor Model that dynamically assign focused tasks to heterogenous machines.\n2) Explicit Consideration of the future impact of the crucial state.\n3) A combination of Offline Imitation Learning followed by Online PPO." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The topic of Resource Opimization using Graph Neural Networks is an open problem that has applications not limited to Cloud Computing. The problem itself is also explored under Multiple Travelling Salesman Problems [1, 2], Vehicle Routing Problem [3], Job Shop Scheduling [4] and Task Allocation and Scheduling in Multi-Agent Systems [5, 6]. While the application of the problem into Cloud Computing is novel, the use of Reinforcement Learning and Graph Attention Networks to similar optimization problems exists.\n\n- It is unclear how the proposed method differs from Online Predictive Scheduling using Heterogenous Graph Attention presented in Wang et al 2022 [7]:\n\n- The enhancement provided by the Online RL part of the model is unclear. The experimental results show that the Offline Learning allows for the model to be within 2% of the Online Training results. The significance of this improvement is unclear and needs to be discussed clearly.\n \n[1] Yujiao Hu, Yuan Yao, and Wee Sun Lee. 2020. A reinforcement learning approach for optimizing multiple traveling salesman problems over graphs. Knowledge-Based Systems 204 (Sept. 2020), 106244. https://doi.org/10.1016/j.knosys.2020. 106244\n\n[2] Yujiao Hu, Zhen Zhang, Yuan Yao, Xingpeng Huyan, Xingshe Zhou, and Wee Sun Lee. 2021. A bidirectional graph neural network for traveling salesman problems on arbitrary symmetric graphs. Engineering Applications of Artificial Intelligence 97 (Jan. 2021), 104061. https://doi.org/10.1016/j.engappai.2020.104061\n\n[3] Steve Paul and Souma Chowdhury. 2022. A scalable graph learning approach to capacitated vehicle routing problem using capsule networks and attention mechanism, Vol. 86236. American Society of Mechanical Engineers, V03BT03A045\n\n[4] Song, Wen, et al. \"Flexible job-shop scheduling via graph neural network and deep reinforcement learning.\" _IEEE Transactions on Industrial Informatics_ 19.2 (2022): 1600-1610.\n\n[5] Z. Wang and M. Gombolay, \"Learning Scheduling Policies for Multi-Robot Coordination With Graph Attention Networks,\" in IEEE Robotics and Automation Letters, vol. 5, no. 3, pp. 4509-4516, July 2020, doi: 10.1109/LRA.2020.3002198.\n\n[6] B. Altundas, Z. Wang, J. Bishop and M. Gombolay, \"Learning Coordination Policies over Heterogeneous Graphs for Human-Robot Teams via Recurrent Neural Schedule Propagation,\" _2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS)_, Kyoto, Japan, 2022, pp. 11679-11686, doi: 10.1109/IROS47612.2022.9981748.\n\n[7] Wang, Z., & Gombolay, M. (2022). Stochastic Resource Optimization over Heterogeneous Graph Neural Networks for Failure-Predictive Maintenance Scheduling. _Proceedings of the International Conference on Automated Planning and Scheduling_, _32_(1), 527-536. https://doi.org/10.1609/icaps.v32i1.19839" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Could the authors elaborate on how their model adapts to significant changes in workflow patterns or cloud configurations without extensive retraining? How does the method maintain robust performance under dynamic conditions that differ from the training data?\n- What are the practical deployment requirements for GOODRL in terms of computational resources, and how do they compare with simpler heuristic-based solutions in large-scale, real-time applications?\n- Can the authors provide more insights into how the method handles noisy or incomplete data, which is a common challenge in real-world cloud scheduling environments?\n- How might the proposed approach be extended or adapted to incorporate multi-objective optimization, such as balancing energy efficiency with flowtime reduction, and what specific challenges would need to be addressed to achieve this?\n- Could the authors comment on potential scalability issues when deploying GOODRL in larger cloud infrastructures or in environments with highly heterogeneous machine configurations, and how these challenges could be mitigated?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper proposes a unique combination of graph representations tailored for actor and critic networks, enhancing action differentiation and value estimation.\n- GOODRL demonstrates improvements in mean flowtime over established baseline algorithms, showcasing robust performance in both offline and online settings.\n- The offline-online learning approach with imitation learning and gradient control addresses challenges in adapting to dynamic environments, adding practical value.\n- The ablation studies and performance comparisons are thorough, providing strong evidence for the contributions and architectural decisions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces an innovative approach, GOODRL, for handling dynamic workflow scheduling in cloud computing environments. This method integrates a task-specific graph representation with Graph Attention Networks (GATs) for actor-critic networks and incorporates offline imitation learning alongside online reinforcement learning to adapt to changing conditions. The proposed system is evaluated in various online and offline scenarios and is shown to outperform existing state-of-the-art methods in terms of mean flowtime and adaptability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper's reliance on simulations limits its generalizability to real-world cloud environments. Practical tests in real cloud data centers would bolster the validity of the results.\n- The experiments primarily focus on a specific set of workflow types and machine configurations, potentially limiting the applicability of findings to other types of DWS problems.\n- The computational overhead associated with the proposed GAT-based architectures is not discussed in detail, raising questions about deployment feasibility in large-scale, real-time applications.\n- While the method performs well in flowtime reduction, other practical objectives, such as energy efficiency and cost, are not explored, which would be valuable for broader applicability.\n- The paper lacks discussion on how the model generalizes to varied workloads, impacting its robustness in dynamic cloud environments." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "This paper proposes an offline-online DRL framework that uses novel graph representations to effectively and efficiently schedule dynamic workflows across heterogeneous machines, significantly improving flowtime compared to state-of-the-art methods." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024graph,\ntitle={Graph Assisted Offline-Online Deep Reinforcement Learning for Dynamic Workflow Scheduling},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4PlbIfmX9o},\nnote={under review}\n}" }, "abstract": { "value": "Dynamic workflow scheduling (DWS) in cloud computing presents substantial challenges due to heterogeneous machine configurations, unpredictable workflow arrivals/patterns, and constantly evolving environments. However, existing research often assumes homogeneous setups and static conditions, limiting flexibility and adaptability in real-world scenarios. In this paper, we propose a novel *Graph assisted Offline-Online Deep Reinforcement Learning* (GOODRL) approach to building an effective and efficient scheduling agent for DWS. Our approach features three key innovations: (1) a *task-specific* graph representation and a *Graph Attention Actor Network* that enable the agent to dynamically assign focused tasks to heterogeneous machines while explicitly considering the future impact of each machine on these tasks; (2) a *system-oriented* graph representation and a *Graph Attention Critic Network* that facilitate efficient processing of new information and understanding its impact on the current state, crucial for managing unpredictable workflow arrivals/patterns in real-time; and (3) an *offline-online* method that utilizes imitation learning for effective offline training and applies gradient control and decoupled high-frequency critic training techniques during online learning to sustain the agent’s robust performance in rapidly changing environments. Experimental results demonstrate that GOODRL significantly outperforms several state-of-the-art algorithms, achieving substantially lower mean flowtime and high adaptability in various online and offline scenarios." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "workflow scheduling", "graph attention neural network", "reinforcement learning", "online learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/8da076d1ee04d72fbd76639c6c98dceda72b2962.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Graph Assisted Offline-Online Deep Reinforcement Learning for Dynamic Workflow Scheduling" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4Po8d9GAfQ
Language Models are Hidden Reasoners: Unlocking Latent Reasoning Capabilities via Self-Rewarding
main
Active
Large language model;Optimizing LLM reasoning capabilities;Self-improvement;Reward model-free optimization;Reinforcement learning
foundation or frontier models, including LLMs
3;3;3;5;5
5;4;4;3;2
2;2;2;3;3
2;2;3;3;3
3;3;2;3;3
3.8
3.6
2.4
2.6
2.8
-0.880705
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Line 290: It is not clear about how the first gradient term in Eq. 4 would lead to optimising the LLM policy to generate higher quality rationales. Further elaboration is needed as to why generating/optimising the likelihood of input question/instruction conditioned on the rationale would lead to better reasoning.\n2. Lines 414-415: Please support with examples about why improvements on ARC-Challenge are relatively lesser. The magnitude of improvements is not at all an issue, however, it should be better demonstrated that why better reasoning chains are not leading to higher improvements. Does this make ARC-Challenge ill-suited for this study since it involves limited reasoning scope?\n3. Line 430: Why is it needed to generate rationales that are as long as 500 tokens? It would be good to show the usefulness of the information contained in the longer rationales and why does it lead to better performance before saturating." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Enhancing the reasoning ability of LLMs without relying on feedback from any external LLM during the training phase is an important research question.\n2. Suitability of applying the variational objective is well motivated, explained and justified in Sections 3 and 4.1.\n3. Accuracy gains over the base and the SFT version of the LLMs are shown with some ablation studies on greedy decoding vs. self consistency based sampling. Further, the authors show that inference time scaling of number of reasoning samples obtained using LaTRO can additionally enhance the accuracy." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The work focuses on enhancing the reasoning abilities of large language models (LLMs) during the training phase without relying on external feedback. The authors motivate the work by raising an important question on improving the reasoning ability of LLMs during training phase since most prior works have focussed at achieving this at inference time. To do so, the authors propose to sample diverse reasoning rationales from latent distribution and optimize the LLM through a variational framework using the sampled rationales. The intuition behind application of the variational framework is well motivated through the objective of self-consistency based chain-of-thought prompting. Further, the work proposes to usethe likelihood of generating correct answer conditioned on a rationale as a proxy to explicit reward models to optimise the LLM towards generating better reasoning chains. Results demonstrate that the proposed LaTRO method helps in improving the accuracy achieved for multiple LLMs such as Mistral-7B, Llama-3.1-8B etc. on GSM8K and ARC-Challenge datasets compared to the corresponding base and the SFT versions of the LLMs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Presentation of the introduction section of the paper needs improvement - It should better motivate the need behind reducing the reliance on external LLMs/feedback for improving a given LLM. Further, it should provide information about why the variational framework is suitable and elaborate some details about the proposed method in the introduction itself.\n2. Discussion on related work is very limited. Even though the proposed method is an effort to reduce reliance on external LLMs, it should still discuss and contrast against existing methods that leverage external LLMs (for example, [1,2]) as well as employ self-play based techniques to improve an LLM by itself with requiring help from any other LLM (for example, [3, 4]). The example references are only indicative (but not exhaustive) of the type of citations that should be included.\n3. Lack of appropriate baselines - Even though the work claims and focusses at improving the base/SFT version of the LLM through the proposed LaTRO framework, it should still compare against some of the existing self-play trainable methods (by adapting them for 7B LLMs) as baselines (eg. [3] - on datasets where the ground-truth rationale is available and [4]). Such methods improve the rationales generated by an LLM by exploring the generation space and discriminating better rationales from the inferior ones.\n4. More reasoning datasets such as CSQA, Hellaswag etc. should be considered for evaluation studies.\n\n[1] Distilling Step-by-Step! Outperforming Larger Language Models with Less Training Data and Smaller Model Sizes. Cheng-Yu Hsieh, Chun-Liang Li, Chih-kuan Yeh, Hootan Nakhost, Yasuhisa Fujii, Alex Ratner, Ranjay Krishna, Chen-Yu Lee, and Tomas Pfister. In Findings of the Association for Computational Linguistics: ACL 2023, pages 8003–8017, Toronto, Canada. Association for Computational Linguistics.\n\n[2] Zephyr: Direct Distillation of LM Alignment. Lewis Tunstall and Edward Emanuel Beeching and Nathan Lambert and Nazneen Rajani and Kashif Rasul and Younes Belkada and Shengyi Huang and Leandro Von Werra and Clementine Fourrier and Nathan Habib and Nathan Sarrazin and Omar Sanseviero and Alexander M Rush and Thomas Wolf. First Conference on Language Modeling, 2024.\n\n[3] Self-Play Fine-Tuning Converts Weak Language Models to Strong Language Models. Zixiang Chen and Yihe Deng and Huizhuo Yuan and Kaixuan Ji and Quanquan Gu. ICML 2024.\n\n[4] Self-Rewarding Language Models. Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Xian Li, Sainbayar Sukhbaatar, Jing Xu, Jason Weston" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "see weakness" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The writing is clear and easy to follow.\n- The discussed topic and motivation are both innovative and significant." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposesLaTRO, a novel framework aimed at enhancing the reasoning capabilities of LLMs. LaTRO addresses the challenge of improving reasoning during the training phase by formulating reasoning as sampling from a latent distribution and optimizing it through variational approaches. This method allows LLMs to self-improve their reasoning process and ability to evaluate the quality of reasoning without external feedback or reward models. The paper validates LaTRO's effectiveness through experiments on GSM8K and ARC-Challenge datasets, demonstrating significant improvements over base models and supervised fine-tuning approaches." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Although I'm not familiar with the topic discussed in this article, I believe the experiments presented are too few and not comprehensive enough. Moreover, the datasets considered are limited to only GSM8K and ARC-Challenge, which lacks persuasiveness.\n- The number of case study examples is too limited, with only a few instances in Figure 4 and the appendix, which is not convincing.\n- The proposed method, LaTRO, especially the stability of the Self-reward component, has not been adequately considered." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. In Proposition 1, $p(y|x) = \\int p(y|z,x) p(z|x) dz$ holds for any CoT-based method. Why, then, is CoT-SC introduced here?\n\n2. What is the definition of \"golden rationales\", and why can’t ARC-Challenge have golden rationales?\n\n3. What can the experimental results on ARC-Challenge demonstrate? The two baselines are too weak, as the SFT baseline did not utilize rationales." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper offers a novel perspective by framing reasoning as a process of sampling from a latent distribution and addressing it through variational methods.\n\n2. The paper leverages the model's own probability estimates as an implicit reward, unifying the training of the policy and reward models.\n\n3. The paper is well-organized and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces LaTRO, a novel approach that formulates Chain-of-Thought (CoT) reasoning as sampling from a latent distribution, optimized through variational techniques. By leveraging the probability of generating the correct answer as an implicit reward, LaTRO unifies the learning of both the policy and reward models, allowing large language models to refine reasoning paths in a self-rewarding manner. The authors demonstrate LaTRO’s effectiveness through experiments on the GSM8K and ARC-Challenge datasets across various model architectures. Their results indicate that latent reasoning capabilities within pre-trained language models can be unlocked and enhanced using this self-improvement framework." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The experimental setup lacks sufficient strong baselines, which are essential for a robust evaluation. Two key baselines to consider are:\n - A stronger SFT Baseline: Fine-tuning the policy model with correct reasoning paths. Given the availability of ground truth answers, multiple reasoning paths could be sampled, retaining only those that align with the ground truth. This baseline would provide a more rigorous comparison for evaluating LaTRO’s effectiveness in reasoning.\n - DPO Baseline: The authors could further fine-tune the policy model using the DPO algorithm, incorporating both correct and incorrect reasoning paths. Actually, the DPO algorithm aligns closely with LaTRO in its approach, as both methods aim to avoid training an explicit reward model. Including DPO as a baseline would highlight LaTRO’s strengths relative to an approach that similarly leverages implicit reward mechanisms.\n\n2. The experimental scope is limited, as only two datasets and small models were tested. \n - Expanding the experiments to include a wider range of reasoning datasets would better assess the model's reasoning capabilities. Standard practice for evaluating reasoning in large language models includes diverse datasets that cover arithmetic reasoning (only GSM8K is not enough for arithmetic reasoning evaluation), commonsense reasoning, symbolic reasoning, and other reasoning types. Incorporating these would provide a more comprehensive evaluation.\n - Testing across varying model scales, especially with larger models, could provide insights into how the approach scales with model size and whether larger models yield better reasoning performance.\n\n3. Although the authors claim that training did not rely on external feedback, the ground truth answers effectively serve as a form of implicit external feedback or reward." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. About self-evaluation task in LaTRO: how does LaTRO evaluate the probability of each reasoning path producing the correct answer? What does the conditional probability mention in Section 4.1 mean? Is there a task restriction for this evaluation method? What is the accuracy of self-evaluation? The authors did not discuss these questions in the paper, nor did they explore them in depth in the experiments.\n\n2. The authors only used a formula to explain the use of self-reward signals to achieve parameter updates, so what exactly does this update parameter refer to during the training phase? How is it implemented? Suggest provide more detailed information." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. LaTRO regards the reasoning process as sampling from a latent distribution and optimizes it using a variational method. This approach is different from prompt-based methods such as CoT and is closer to unsupervised learning. Besides, the feasibility of LaTRO is verified by mathematical proof.\n\n2. This paper focuses on a very interesting topic, which enables an LLM to improve itself through a self-reward mechanism without external supervision and feedback signals." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces LaTRO, a framework that enhances LLM’s reasoning abilities by treating reasoning as sampling from a latent distribution and optimizing it with variational methods. LaTRO allows LLMs to improve their reasoning process and evaluation of reasoning quality simultaneously, without external feedback. Experiments on GSM8K and ARC-Challenge datasets demonstrate the effectiveness of LaTRO compared with the SFT training method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Request a step-by-step description or flowchart of the LaTRO pipeline. Although a large number of formulas are used in this paper to explain each part,but it lacks a detailed description of the proposed method, which makes it difficult for readers to understand the complete pipeline of the proposed method and reproduce it. \n\n2. As far as I know, there are some works to gradually enhance the capabilities of LLM (not limited to reasoning task), including prompt-based [1][2][3] and training-based methods [4][5][6], some of which do not use any feedback information to enhance the capabilities of LLM [7][8]. The author should discuss the differences between this work and these works and compare the performance of these works. It is necessary to add a dedicated subsection in the Related Work section discussing these specific works and their methods. If possible, include performance comparisons with these methods in the experimental section.\n\n[1] When can llms actually correct their own mistakes? A critical survey of self-correction of llms\n\n[2] Learning From Mistakes Makes LLM Better Reasoner\n\n[3] Mirror: A Multiple-perspective Self-Reflection Method for Knowledge-rich Reasoning\n\n[4] REFINER: Reasoning Feedback on Intermediate Representations\n\n[5] CRYSTAL: Introspective Reasoners Reinforced with Self-Feedback\n\n[6] SELF: Language-driven Self-evolution for Large Language model\n\n[7] Small language modes can self-correct\n\n[8] Think Thrice Before You Act: Progressive Thought Refinement in Large Language Models\n \n3. In the experiments, the author only employs SFT training method and the base model as baselines, without comparing the performance with COT-SC mentioned in Section 3 and the self-improvement classic work mentioned above, making it difficult to demonstrate the advantages of the proposed LaTRO. So, please provide a more comprehensive analysis of how LaTRO performs relative to these additional baselines across different tasks and metrics." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See \"Weaknesses\"." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The structure of this paper is clear, and it's easy to follow the main idea and the contribution.\n2. The research problem this paper targets is very important to the community.\n3. The motivation is sound and the benefits of the approach are clear." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a principled framework, LaTRO, to treat the reasoning process as sampling from latent distribution and enable LLMs themselves as reward models to evaluate the quality of reasoning rationales. The proposed LaTRO outperforms supervised fine-tuning baselines on 2 datasets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Although modeling the rationales as latent variables to sample is well defined and proposed, the paper lacks a discussion with previously proposed reasoning methods that formulate the reasoning process as latent variables [1, 2] as well. Even though they are cited in the related work. Specifically, [1] proposes to sample diverse reasoning rationales to improve the prediction performance and also proposes an EM-like algorithm to improve the reward model and LLM alternatively. It would be great to have a discussion and comparison with these methods.\n2. The paper does not compare with various prompting-based reasoning baselines mentioned in the related work section, such as tree-of-thought[3], and RAP[4], as well as fine-tuning baselines such as STaR[5], which is a missed opportunity to demonstrate its effectiveness. It would be better to compare them with metrics like training / inference computational cost and accuracy.\n3. The paper does not provide a confidence interval, leading to unawareness of how the proposed LaTRO is robust to the initialization and randomness. It would be great to report the results from at least 3 times repetitions.\n4. As the proposed LaTRO is a general approach, it would be great to evaluate it on more benchmark datasets to verify its effectiveness, such as HumanEval[6] and MBPP [7], which are popular in the current LLM reasoning community.\n\n\n[1] Hu, Edward J., et al. \"Amortizing intractable inference in large language models.\" arXiv preprint arXiv:2310.04363 (2023).\n\n[2] Hoffman, Matthew Douglas, et al. \"Training chain-of-thought via latent-variable inference.\" Advances in Neural Information Processing Systems 36 (2024).\n\n[3] Yao, Shunyu, et al. \"Tree of thoughts: Deliberate problem solving with large language models.\" Advances in Neural Information Processing Systems 36 (2024).\n\n[4] Hao, Shibo, et al. \"Reasoning with language model is planning with world model.\" arXiv preprint arXiv:2305.14992 (2023).\n\n[5] Zelikman, Eric, et al. \"Star: Bootstrapping reasoning with reasoning.\" Advances in Neural Information Processing Systems 35 (2022): 15476-15488.\n\n[6] Chen, Mark, et al. \"Evaluating large language models trained on code.\" arXiv preprint arXiv:2107.03374 (2021).\n\n[7] Austin, Jacob, et al. \"Program synthesis with large language models.\" arXiv preprint arXiv:2108.07732 (2021)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024language,\ntitle={Language Models are Hidden Reasoners: Unlocking Latent Reasoning Capabilities via Self-Rewarding},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4Po8d9GAfQ},\nnote={under review}\n}" }, "abstract": { "value": "Large language models (LLMs) have shown impressive capabilities, but still struggle with complex reasoning tasks requiring multiple steps. While prompt-based methods like Chain-of-Thought (CoT) can improve LLM reasoning at inference time, optimizing reasoning capabilities during training remains challenging. We introduce LaTent Reasoning Optimization (LaTRO), a principled framework that formulates reasoning as sampling from a latent distribution and optimizes it via variational approaches. LaTRO enables LLMs to concurrently improve both their reasoning process and ability to evaluate reasoning quality, without requiring external feedback or reward models. We validate LaTRO through experiments on GSM8K and ARC-Challenge datasets using multiple model architectures. On GSM8K, LaTRO improves zero-shot accuracy by an average of 12.5\\% over base models and 9.6\\% over supervised fine-tuning across Phi-3.5-mini, Mistral-7B, and Llama-3.1-8B. Our findings suggest that pre-trained LLMs possess latent reasoning capabilities that can be unlocked and enhanced through our proposed optimization approach in a self-improvement manner." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Large language model", "Optimizing LLM reasoning capabilities", "Self-improvement", "Reward model-free optimization", "Reinforcement learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/a01dd007cfd17c70238397405c1e340140150e36.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Language Models are Hidden Reasoners: Unlocking Latent Reasoning Capabilities via Self-Rewarding" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4QVgnxXVDB
3CIL: Causality-Inspired Contrastive Conditional Imitation Learning for Autonomous Driving
main
Active
Imitation Learning;Autonomous Driving;Causal Reasoning;Causal Confusion
applications to robotics, autonomy, planning
3;5;6;6
4;5;4;4
2;2;4;3
2;3;3;3
2;2;3;3
5
4.25
2.75
2.75
2.5
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "NA" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "My main concern is that there are lots of assumptions made in this paper that are unsupported by evidence or experiments, I would change my opinion if the authors present more ablation experiments that carefully study each of their design decisions. I would also love to see more qualitative examples (instead of just descriptions). Lastly, the authors should give more details when comparing the baselines. Are the performance gain simply caused by a better network architecture or a bigger network capacity?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* The authors do a good job summarizing previous findings about causal confusion problems in self-driving tasks\n* Proposed lots of interesting strategies to potentially solve or alleviate the causal confusion problems" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors present a new imitation learning algorithm that aims to solve some causal confusion problems in previous imitation learning methods on self-driving tasks. Specifically, the authors propose to 1) learn a more representative state representation; 2) reduce the chance of learning spurious-correlation by inferring delta actions from latent states, and 3) weight training samples by the discrepancy between prediction and ground truth." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* Authors made lots of assumptions:\n * It's not sufficient to directly map ot to at\n * This remains an untested hypothesis \n * learning a decoder for \\hat{s}t helps it match with expert st\n * on the contrary, learning a decoder for \\hat{s}t could force the encoder to focus on every detail in the image, even the ones that do not directly contribute to ground st.\n * Since delta(at) is inferred from (st), it doesn't learn the spurious correlation\n * This assumption can be wrong since st would contain information from a(t-1)\n * The proposed method is better than baselines in most scenarios, but is that because of the design choices or just better models or bigger capacities?\n* Figure 2 could have more annotations\n * It would be better if the authors could annotate the different colors and shapes of each node" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The improvements in those scenarios look promising. But, how to demonstrate it's coming from reducing the confounding factors as shown in previous sections?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Formulate the imitation learning problem from a causal perspective and tries to prevent confounding factors using representation learning.\n- Proposes to use supervised contrastive learning to learn an image representation that aligns with expert actions.\n- The improvements in the tested scenarios is promising." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes to solve causal confusion problem in imitation learning using supervised contrastive learning, residual prediction and sample weighting. It draws insights from causality that motivates to learn a representation of history observations without spurious correlations.\nExperiments on CARLA shows solid improvements over baselines, such as CIL and Premier-TACO." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Lack of comparisons with some related work, such as Wen et.al. Key-frame focused visual imitation learning, which proposes a weighting strategy based on action predictability.\n- It would be nice to have more quantitative and qualitative analysis of the improvement. Can we attribute those to improvement in reducing spurious correlations?\n- Lack of evaluation on CARLA benchmark instead of self-constructed scenarios." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the Weaknesses part." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The proposed framework is well-motivated.\n\nThe experimental result looks good." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "To deal with the causal confusion problem in imitation learning, authors take the inspiration from causal learning and propose the 3CIL framework which integrates contrastive learning, action residual prediction, and importance weighting techniques together. Testing on autonomous driving benchmark CARLA, 3CIL achieves good performance with higher success rate and lower collision times than the baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tFor importance weighting, would you please intuitively or theoretically explain the motivation for using the errors of action residual prediction as weights, instead of other choices, e.g., AdaBoost or Keyframe? Or would you please compare with them?\n\n2.\tWhy is it necessary to divide the imitating process into two separate stages? I suggest comparing with training the representation modules and the policy end-to-end.\n\n3.\tIn Table 1, why is the performance of 3CIL in scenario 3 worst across all scenarios? Even worse than the unseen ones, i.e., 5 and 6.\n\n4.\tMore ablation studies, analysis experiments, and visualizations are necessary. The current experimental results only contain the comparison with baselines and two simple ablation studies. I suggest having more experiments and visualizations to verify your arguments that 3CIL successfully removes the spurious correlation and importance weighting correctly finds the rare scenarios.\n\n5.\tMissing some important references [2,3,4].\n\n\n[1] Keyframe-focused visual imitation learning\n\n[2] Chauffeurnet: Learning to drive by imitating the best and synthesizing the worst\n\n[3] Fighting Fire with Fire: Avoiding DNN Shortcuts through Priming\n\n[4] Shaking the foundations: delusions in sequence models for interaction and control" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "### **Some high-level questions**\n1. In \"3CIL: Causality-Inspired Contrastive Conditional Imitation Learning,\" the abbreviation \"CIL\" is first defined in line 162. However, it’s unclear what the \"C\" stands for. Is it \"Causality\" or \"Conditional\"? Could you clarify this term for consistency?\n\n2. Does the input for the VAE include all the past history observations $o_{1:t}$?\n\n3. Some important details are in the appendix. The authors should consider move them to the major context, e.g., Fig. 4 and Fig. 5. \n\n### **Some questions about theoretical guarantee**\n1. As discussed in (Ruan et al., 2022; Ruan & Di, 2022; Kumor et al., 2021), unobserved confounders (UCs) often complicate causal identifiability, and certain variables must be considered to mitigate the influence of spurious correlations or shortcuts during the learning process. In your approach, which specific variables are crucial to achieving this objective? E.g., which variables are required to block all the backdoor paths.\n\n2. Following up on the previous question, if all the past ground-truth states $s_{1:t}$ are unobserved (common in POMDPs), and $a_{t-1} \\leftarrow s_{t-1} \\rightarrow s_{t} \\rightarrow a_{t}$ is active, in other words, there is an active backdoor path between $a_{t-1}$ and $a_{t}$ which can never be blocked. Given that this path cannot be blocked, is the policy learning process identifiable or robust? How do you ensure convergence in policy learning under these circumstances? \n\n3. The idea of training a representation model $G$ is not that novel. Especially, when $\\hat{s}_{t}$ is unobserved, it is very hard to directly determine whether the representation model is good or not. While simulations may provide insights, evaluating the model’s practical effectiveness in real-world conditions can be significantly harder. What methods do you suggest to compare model performance outside of a simulation environment?\n\n4. The proposed method appears to be a pipeline structure (i.e., representation model + policy model).\n \n 4.1 IIf overall performance is not expected, what strategy would you recommend to isolate and improve the specific component responsible? How can one effectively determine whether limitations stem from the representation model or from the policy?\n \n 4.2 Will there be any cascaded errors from upstream to downstream tasks? Could you elaborate on any mechanisms in place to mitigate such cascading errors?\n\n### **Some questions about experiments**\n\n1. In your experiments, does the imitator have access to the reward $R$? Additionally, are the expert demonstrations generated from RL algorithms that use the same reward function $R$? \n\n2. For the observations, the expert is able to observe $s_{t}$, but the imitator is only able to observe $o_{t}$. Is that correct?\n\n3. To what extent does the reward $R$ reflect real-world driving behaviors? How accurately does it capture the dynamics observed in actual driving scenarios?\n\n4. Could the authors add more details to the reward $R$? Specifically, how are the four components $r$ defined? If the primary objective is to evaluate route adherence, is $r_{position}$ alone sufficient, or are the other rewards essential? Please clarify the role of each reward component.\n\n5. Additional metrics could enhance the evaluation process, such as the RMSE between predicted positions and target routes. Would the authors consider including these metrics for a more comprehensive analysis?\n\n**Should the authors address these questions thoroughly, I would consider raising my evaluation score.**\n\n---------\n\nReferences:\n- Pearl, Judea. Causality. Cambridge university press, 2009.\n- Peters, Jonas, Dominik Janzing, and Bernhard Schölkopf. Elements of causal inference: foundations and learning algorithms. The MIT Press, 2017.\n- Ruan, Kangrui, and Xuan Di. \"Learning human driving behaviors with sequential causal imitation learning.\" Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 36. No. 4. 2022.\n- Ruan, Kangrui, et al. \"Causal imitation learning via inverse reinforcement learning.\" The Eleventh International Conference on Learning Representations. 2023.\n- Kumor, Daniel, Junzhe Zhang, and Elias Bareinboim. \"Sequential causal imitation learning with unobserved confounders.\" Advances in Neural Information Processing Systems 34 (2021): 14669-14680." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The idea of combining causality, contrastive learning and conditional imitation learning is quite interesting. The performance seems to be good for all scenarios. Fig. 1 is intuitive." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose to combine causal reasoning techniques to assist imitation for autonomous driving. Specifically, the paper presents a novel approach, causality-inspired contrastive conditional imitation learning (3CIL), which integrates contrastive learning and action residual prediction. The framework is based on POMDP, trying to mimic the scenarios when the expert and imitator share different views." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My major concerns lie in theoretical clarity, experimental design, and practical applicability. In the absence of strong theoretical guarantees, the paper would benefit greatly from robust experimental results. Additionally, it’s crucial to provide a clear rationale for the integration of causality, contrastive learning, and conditional imitation learning, explaining why this combination is necessary. I have outlined specific questions below." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024cil,\ntitle={3{CIL}: Causality-Inspired Contrastive Conditional Imitation Learning for Autonomous Driving},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4QVgnxXVDB},\nnote={under review}\n}" }, "abstract": { "value": "Imitation learning (IL) aims to recover an expert's strategy by performing supervised learning on the demonstration datasets. Incorporating IL in safety-crucial tasks like autonomous driving is promising as it requires less interaction with the actual environment than reinforcement learning approaches. However, the robustness of IL methods is often questioned, as phenomena like causal confusion occur frequently and hinder it from practical use. In this paper, we conduct causal reasoning to investigate the crucial requirements for the ideal imitation generalization performance. With insights derived from modeled causalities, we propose causality-inspired contrastive conditional imitation learning (3CIL), a conditional imitation learning method equipped with contrastive learning and action residual prediction tasks, regularizing the imitator in causal and anti-causal directions. To mitigate the divergence with experts in unfamiliar scenarios, 3CIL introduces a sample-weighting term that transforms the prediction error into an emphasis on critical samples. Extensive experiments in the CARLA simulator show the proposed method significantly improves the driving capabilities of models." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Imitation Learning", "Autonomous Driving", "Causal Reasoning", "Causal Confusion" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/56180408d355331d3fa6a181b1ef6d6746b86aea.pdf" }, "presentation": null, "primary_area": { "value": "applications to robotics, autonomy, planning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "3CIL: Causality-Inspired Contrastive Conditional Imitation Learning for Autonomous Driving" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4QWPCTLq20
IntelLLM: Little Hints Make a Big Difference for LLM KV Cache Compression
main
Active
LLM;KV cache compression;CGE;RGL
foundation or frontier models, including LLMs
3;3;3;3;3
4;4;3;4;4
2;1;1;2;2
3;2;1;2;2
2;1;1;3;3
3
3.8
1.6
2
2
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper addresses an important research problem, KV cache optimization for LLM inference, and proposes two interesting techniques: center of gravity eviction (CGE) and remote gap localization (RGL).\n\n- Empirical results are competitive with prior work and baseline models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper develops a KV-cache compression technique called IntelLLM to optimize inference in LLM on long text tasks. IntelLLM consists of two cache eviction strategies: center of gravity eviction (CGE) and remote gap localization (RGL). CGE mitigates the domain semantic imbalance by redirecting attention away from the center of gravity attention (cluster of important KVs). RGL solves the issue of time span vanishing caused by cache compression by assigning cache position values to distant KVs. Empirical evaluation shows IntelLLM saves KV cache memory by 50% with similar performance on long text tasks compared to baseline." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Using sparsity in attention to compression KV cache is not new. Two ICLR 2024 papers: StreamingLLM (https://openreview.net/forum?id=NG7sS51zVF) and FastGen (https://openreview.net/forum?id=uNrFpDPMyo) both observe the attention patterns and use it to compress KV cache.\n\n- Missing important work in both related work and baseline comparison. The paper did compare with StreamingLLM, but does not discuss it in related work. In fact, the paper misses many important KV cache prior works:\n(1) Model Tells You What to Discard: Adaptive KV Cache Compression for LLMs, ICLR 2024, https://openreview.net/forum?id=uNrFpDPMyo\n(2) SnapKV: LLM Knows What You are Looking for Before Generation, https://arxiv.org/abs/2404.14469\n(3) XC-Cache: Cross-Attending to Cached Context for Efficient LLM Inference, https://arxiv.org/abs/2404.15420\n(4) Layer-Condensed KV Cache for Efficient Inference of Large Language Models, https://arxiv.org/abs/2405.10637 \n(5) PyramidInfer: Pyramid KV Cache Compression for High-throughput LLM Inference, https://arxiv.org/abs/2405.12532\n(6) PyramidKV: Dynamic KV Cache Compression based on Pyramidal Information Funneling, https://arxiv.org/abs/2406.02069 \n\n\n- Many writing sections are unclear. \n(1) The introduction (line 55) says prior work has two limitations, does IntelLLM have these two limitations too? What is the conflict between increased computational overhead and memory optimization? Line 83 says significant, how much memory is saved? Any speed gains? \n(2) The analysis in Section 3.1 is weak without any evidence or citations, the two conclusions are not convincing either. For example, line 172 says they compromise the robustness of the attention distribution. What is the robustness of attention in the first place? And why would the covariates compromise this? Line 180 says sliding window fails to reason effectively about long texts, any evidence or citation? Line 183 says they contribute to the collapse of the LLM, again, no evidence or justification. \n(3) The two theorems in sections 3.2 and 3.3 are not theorems, and the paper provides no proof.\n\n- Evaluation is weak and flawed. Table 1 (line 399) presents the results of IntelLLM on longbench. But unclear why the window size is 4K for IntelLLM and 8K for others. There is no side-by-side efficiency comparison either. Line 443 says the latency increased by 2.63% but the memory saves 50%, is it the case that IntelLLM always saves 50% memory? There is no ablation study on that." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "As weakness" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The combination of CGE and RGL provides a novel solution to the KV cache memory challenge, enhancing memory efficiency without fine-tuning or substantial performance loss. The paper presents a strong theoretical basis for its methods, including insights into the sparsity of attention weights and the impact of key tokens, strengthening the validity of the proposed strategies. IntelLLM is easy to integrate into existing LLM frameworks, as it requires minimal modifications, making it highly practical for real-world deployment, especially in resource-constrained environments. The extensive benchmarking on LongBench with models like Llama-3-8B-Instruct and Mistral-7B demonstrates IntelLLM's efficiency and adaptability across diverse tasks, validating the approach. Achieving 50% KV cache reduction with a negligible increase in latency is a noteworthy achievement, making IntelLLM suitable for long-text inference tasks in various settings." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces IntelLLM, a framework that aims to optimize the key-value (KV) cache compression for large language models (LLMs) without compromising performance. It addresses the challenge of high memory consumption during long-sequence inference by using two innovative techniques: Center of Gravity Eviction (CGE) and Remote Gap Localization (RGL). CGE prioritizes important tokens in attention mechanisms to ensure efficient memory use, while RGL preserves essential long-range dependencies using positional features. These strategies enable significant memory savings, reducing KV cache usage by 50%, with only a minimal impact on inference latency. The authors demonstrate IntelLLM's effectiveness through comprehensive experiments, achieving performance comparable to or better than full KV models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Sparse attention is already well explored in several previous works as [1] [2]. This will weaken the novelty of this work. H2O [3] has already well-explored the feedback of using sliding window.\n2. Lack of baselines (i.e., H2O [3], SnapKV [4], PyramidKV [5])\n3. Evaluation of Needle in a Haystack is required to help illustrate your motivation of maintaining long-range dependencies\n4. GCE is pretty close to previous methods like H2O [3] and SnapKV [4]. I can only see limited novelty over this method.\n\n[1] https://arxiv.org/abs/2402.17762\n[2] https://arxiv.org/pdf/2309.17453\n[3] https://arxiv.org/abs/2306.14048\n[4] https://arxiv.org/abs/2404.14469\n[5] https://arxiv.org/abs/2406.02069" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. What is the rationale behind choosing StreamingLLM and InfiniteLLM as KV cache compression baselines?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "**Significance:** The paper claims 50% KV cache compression without a significant drop in performance, outperforming other KV cache compression methods in a majority of datasets in LongBench and outperforming full KV cache in some datasets. The method does not require fine-tuning, making it easy to apply to existing LLMs." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper aims to further the Pareto frontier of KV-cache compression rate and performance. By employing strategic eviction strategies, the method leverages the observation that only a small subset of tokens in long texts capture the majority of attention weights." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. There does not seem to be a rigorous proof or set of empirical observations to substantiate the theorems proposed in sections 3.2 and 3.3.\n2. There is no discussion on the method used to choose \"k\" – the number of top keys to be treated as \"centers of gravity\".\n3. Implementation details are not provided, making it hard to reproduce the results.\n4. There does not seem to be an ablation study to evaluate how the method performs when only either CGE or RGL is used.\n5. There is no discussion on specific deployment environments where a 50% memory saving will enable new use cases, or how this method can be combined with other methods to further increase memory savings.\n6. The experiments only provide two KV cache compression methods as baselines, leaving out other KV cache compression methods that do not require fine-tuning, such as static prefix caching, paged attention, or radix attention. Additionally, the experiments do not compare with other approaches that do not involve KV cache compression.\n7. Best performing methods are not clearly marked in the experiment result tables.\n8. The explanations and visualizations of CGE and RGL are unclear." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "There is no reference to table 2, why the experiments are different on Mistral and Llama?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The problem investigated in the paper, KV cache compression, is crucial for long-context generation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents IntelLLM, a method for compressing the key-value (KV) cache in Large Language Models (LLMs) to address memory constraints in long-sequence processing. Drawing on the sparsity of attention mechanisms, IntelLLM focuses on retaining only essential tokens, significantly reducing the KV cache size without compromising model performance. The proposed approach combines two strategies: center of gravity eviction (CGE), which prioritizes important tokens to preserve key semantic information, and remote gap localization (RGL), which maintains long-range dependencies using positional features. IntelLLM can integrate smoothly with existing LLMs, requiring minimal modifications and no fine-tuning. Experimental evaluations show IntelLLM achieves performance close to StreamLLM while halving KV cache requirements." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "a.\tThe paper lacks substantial references from the past year, missing important studies like SnapKV[1], FastGen[2], H2O[3], PyramidKV[4] and etc.\nb.\tThe observation that only a limited subset of tokens is critical for long-context generation has been extensively discussed in these and other recent works, which should be cited to provide a more comprehensive background.\nc.\tThe experimental baseline used in the study is relatively weak; including stronger baselines from the above-mentioned works would enhance the robustness of the comparative analysis and strengthen the validity of the results.\nd.\tIn Section 3, the two presented \"theorems\" are more accurately findings, as no formal proofs are provided to substantiate these claims.\n\n[1] SnapKV: LLM Knows What You are Looking for Before Generation\n[2] Model Tells You What to Discard: Adaptive KV Cache Compression for LLMs\n[3] H2O: Heavy-Hitter Oracle for Efficient Generative Inference of Large Language Models\n[4] PyramidKV: Dynamic KV Cache Compression based on Pyramidal Information Funneling" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Suggestion on improving the overall presentation. First, there are some notation used without definition, e.g. $n, m, l_{head}, l_{tail}$ in Algorithm 1, and the operation $[:]$ (which dimension is applied to?) used in Algorithm 1. Second, terms such as \"Center of Gravity\" should be defined formally in the KV cache eviction topic. Third, there remains some minor grammar and expression mistakes in this paper, such as the misuse of citation format. Forth, the presentation of the tables and figures should be improved. The average score should be reported in Table 1 & 2. The meaning of the x-axis and y-axis of Figure 1 & 2 should be clarified.\n\n2. The description of Algorithm 1 is confusing. Could you provide a very detailed explanation of the calculation process in natural language? \n\n3. There isn't any model named \"Llama3-7B\". Do you mean Llama-3-8B?\n\n4. What are the detailed settings of the baselines, e.g. how much KV cache is retained? Could you provide more information on the hyperparameters used on baselines?\n\n5. The experimental results mentioned in line 316-319 should be included in this paper if such conclusion is drawn. If there is no space, it should be placed in the Appendix. The experiments in line 359-361 should also be included.\n\nOverall, I really like the idea proposed by this paper, which I personally find very inspiring. I will raise my rating on soundness and overall rating if good discussion is made with necessary experimental results during the rebuttal phase." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. This paper provides a clear explanation of why the attention mechanism tends to allocate excessive weights to a few tokens with high attention scores while neglecting others. The analysis of the softmax function is insightful and well-reasoned.\n\n2. The proposed method, IntelLLM, demonstrates strong performance in benchmarks and shows compatibility across multiple models, including widely-used ones like Llama and Mixtral.\n\n3. The method is simple and easy to implement. IntelLLM can be integrated into various scenarios and inference frameworks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents IntelLLM, a novel KV cache eviction algorithm designed to alleviate the storage and computational burden of Transformer-based large language model inference. IntelLLM leverages the sparsity of attention mechanisms and strategically evicts certain tokens to improve inference efficiency. The authors propose two key strategies: Center of Gravity Eviction (CGE) and Remote Gap Localization (LGL). CGE addresses the semantic loss caused by dominant attention scores in softmax. LGL reorganizes token positions by creating a large gap between global and local tokens, further enhancing processing efficiency. IntelLLM is evaluated on LongBench with two models, Llama-3-8B and Mixtral-7B-instruct-v0.2, and outperformed all the baselines, including Full KV cache, StreamingLLM, and LM-Infinite. The algorithm achieved 2x KV cache compression ratio for Llama and an 8x compression ratio for Mixtral, maintaining strong inference efficiency despite the reduced cache size. Additionally, the paper includes ablation studies focusing on Head Gravity and the RGL Gap, demonstrating the soundness and effectiveness of IntelLLM's design." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The presentation is to some extent unclear and confusing. Some notations and terminologies are used without a clear definition. The description of Algorithm 1 is confusing and hard to understand. Tables and figures and be organized in a better format. Details of presentation issues are described in question Q1.\n\n2. Insufficient baseline selection. The proposed algorithm of this paper is an eviction algorithm that compresses KV cache, thus there should be comparison between other popular eviction methods, including H2O [1], SnapKV [2], SirLLM [3] and IntervalLLM (a baseline created in SirLLM). It would be better to compare IntelLLM with other end-to-end methods, including InfLLM [4] and MInference [5], as they utilize attention sparsity. (This is only a suggestion and the authors are not required to test all the methods above during the review process, but futher discussion is expected.)\n\n3. Insufficient benchmark datasets. Although LongBench is a classic benchmark for long context inference, the average length is relatively short and the hardness is limited. It is expected to benchmark IntelLLM on harder benchmark, e.g. L-Eval [6], and longer benchmark, e.g. RULER [7] or InfiniteBench [8]. It would also be better to test IntelLLM on accurate context retrieval tasks, e.g. Ret.PassKey in InfiniteBench and RULER. (This is also a suggestion, and the authors are not required to test all of the benchmarks mentioned above. However, some simple supplementary benchmarks are welcomed.)\n\n4. Lack of ablation studies. Ablations on $L_{comp}$ and $L_{near}$ are expected, as they can be used to prove which part of the retained KV cache is more important and to what extent can the hyperparameters affect the overall performance. Also, there should be comparison between RGL and other methods of assigning position information, such as assigning continuous position ids and not assigning any position information for distant tokens.\n\n5. Unclear presentation of research intention. KV cache compression is a technique developed to enhance model generation speed or reduce memory consumption. The paper should clearly state out which purposed is focused mainly, and conduct the corresponding experiments. For example, the model generation speed should be tested and it is expected to be faster than full KV cache inference. The peak memory is also expected to be lower if the system implementation is delicate enough. The speed reported in line 445-446 cannot state the research intention as the pre-fill speed is slightly lower. Reporting a faster generation speed (the speed only tested on the decoding stage and excluding the pre-fill stage) might be helpful.\n\n6. No limitation and future work discussion. This paper should include some vital limitations and potential future work of the proposed methods.\n\n\n[1] H2O: Heavy-Hitter Oracle for Efficient Generative Inference of Large Language Models\n\n[2] SnapKV: LLM knows what you are looking for before generation\n\n[3] SirLLM: Streaming Infinite Retentive LLM\n\n[4] InfLLM: Unveiling the intrinsic capacity of LLMs for understanding extremely long sequences with training-free memory\n\n[5] Minference 1.0: Accelerating pre-filling for long-context llms via dynamic sparse attention\n\n[6] L-eval: Instituting standardized evaluation for long context language models\n\n[7] RULER: What’s the Real Context Size of Your Long-Context Language Models?\n\n[8] ∞ Bench: Extending Long Context Evaluation Beyond 100K Tokens" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024intelllm,\ntitle={Intel{LLM}: Little Hints Make a Big Difference for {LLM} {KV} Cache Compression},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4QWPCTLq20},\nnote={under review}\n}" }, "abstract": { "value": "Large Language Models (LLMs) have demonstrated exceptional capabilities in integrating contextual knowledge, but their deployment is often constrained by the substantial computational resources required for long text sequences. To mitigate the inference time cost associated with attention mechanisms, LLMs utilize key-value embedding caching techniques (KV cache), which introduce significant storage pressure. In this paper, we propose IntelLLM, a novel and efficient approach to KV cache compression that strikes a balance between compression rate and performance. Drawing inspiration from sparse attention mechanism, we observe that only a small subset of tokens in lengthy texts capture the majority of attention weights. This sparsity, intrinsic to the attention mechanism, serves as the foundation for improving the KV compression ratio through a strategic eviction method. IntelLLM is composed of center of gravity eviction (CGE) strategy and remote gap localization (RGL) strategy. CGE is designed to address the potential loss of important semantic dependencies when evicting high-sparsity tokens, which prioritizes the retention of key tokens by shielding the center of gravity of attention during inference, thereby preserving critical information and optimizing the efficiency of attention computation. Additionally, RGL is proposed to leverage implicit positional features to maintain long-range dependencies, inspired by advancements in location encoding research. Our KV compression approach integrates seamlessly with existing LLMs, requiring minimal code modifications without the need for fine-tuning or model parameter changes. IntelLLM not only significantly reduces the storage requirements for KV cache but also consistently outperforms full KV models in long text processing tasks, while utilizing only 50% of the typical KV cache expenses." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "LLM", "KV cache compression", "CGE", "RGL" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/e70bdc24592a789f5a144d6ea6d61377169446db.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "IntelLLM: Little Hints Make a Big Difference for LLM KV Cache Compression" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4R71pdPBZp
Self-Evolving Multi-Agent Networks for Software Development
main
Active
Software development;LLM;Multi-agent collaboration
foundation or frontier models, including LLMs
6;6;8;8
3;3;4;3
3;3;3;4
2;3;3;3
3;4;4;4
7
3.25
3.25
2.75
3.75
0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. Are there any issues with the citation format in the paper?\n2. Does the paper lack an appendix?\n3. In Table 2, there is no comparison of results with environment tools but without evolving. This should be added." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper proposes a multi-agent collaboration approach to address software development problems.\n2. This paper introduces a software development benchmark called RSD-Bench, which provides more detailed and structured software requirements for documenting user needs compared to previous benchmarks.\n3. Extensive experiments demonstrate that EvoMAC outperforms other single-agent and multi-agent methods on both RSD-Bench and HumanEval." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a multi-agent collaboration approach to address software development problems. EvoMAC obtains text-based environmental feedback by verifying the match between the MAC network's output and the target proxy, and it updates the network using a novel textual backpropagation technique, thereby achieving the final development outcome. Additionally, this paper introduces a software development benchmark called RSD-Bench, which provides more detailed and structured software requirements for documenting user needs compared to previous benchmarks. The final experimental results show that the proposed EvoMAC outperforms other single-agent and multi-agent methods on both RSD-Bench and HumanEval." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The benchmark proposed in this paper lacks data analysis and some basic statistical information, such as prompt length, the number of final generated files/functions, etc.\n2. The benchmark proposed in this paper is relatively easy, with the EvoMAC method already achieving around 90% accuracy." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How sensitive is EvoMAC to the quality and specificity of feedback from unit tests? If the unit tests are incomplete or overly general, would EvoMAC still produce reliable code, or would it require stricter validation criteria?\n2. Can EvoMAC work effectively with models of different sizes, or does it rely on the power of high-capacity LLMs? Would it perform satisfactorily with smaller models that might be more efficient in constrained environments?\n3. Can the self-evolving mechanism be applied to other domains outside software development? If yes, how?\n4. Given EvoMAC’s iterative approach, how would it handle larger software projects with thousands of lines of code and extensive requirements? Are there specific design considerations for scaling it to more extensive projects?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- This self-evolving paradigm allows MAC networks to adapt iteratively based on environmental feedback. The framework employs a mechanism similar to neural network backpropagation, where the output of the MAC network is verified against a target proxy, facilitating continuous learning and improvement.\n- The RSD-Bench provides a structured benchmark for software-level coding tasks, focusing on comprehensive requirements rather than isolated functions.\n- By incorporating unit tests and compilers as feedback mechanisms, EvoMAC reduces subjectivity and provides reliable feedback, which is critical for verifying the correctness of generated code. The objective environment-based feedback is an effective alternative to critique agents, which can introduce bias and hallucinations." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a novel framework called EvoMAC, aimed at enhancing the capabilities of LLM-driven multi-agent collaboration (MAC) systems in software development. The authors argue that traditional MAC systems are heavily reliant on human-designed workflows, which limits their adaptability and performance in real-world scenarios. EvoMAC seeks to overcome these limitations by enabling self-evolution of agents and their connections during task execution." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The EvoMAC framework introduces significant complexity with its multi-agent setup, dynamic adjustments, and textual backpropagation mechanism. This complexity may limit the framework's accessibility and implementation ease for real-world adoption outside of specialized research contexts.\n- Although EvoMAC performs well with large models like GPT-4o-Mini, its performance with smaller or less capable models is unclear. This reliance may restrict its applicability, particularly in environments with limited computational resources.\n- RSD-Bench focuses on website and game software types, which may not comprehensively represent the diversity of real-world software development tasks. Expanding the evaluation to include other domains, such as enterprise applications or data processing software, would enhance the generalizability of the results." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Please refer to the questions in Weaknesses." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- Effectively demonstrates EvoMAC's evolution process as analogous to neural network training, establishing a reliable target proxy for evaluation and constructing a clear objective.\n\n- The paper is well-structured and easy to follow, with thorough explanations of EvoMAC’s self-evolving process,the design of the RSD-Bench, and detailed descriptions of experimental procedures. Figures and benchmarks illustrate the methodology effectively, aiding comprehension.\n\n- By addressing limitations in traditional MAC systems and demonstrating EvoMAC’s efficacy on challenging software-level tasks, this work sets a promising precedent for adaptive agent frameworks in automated software development, making it valuable for both research and practical applications.\n\n- The RSD-Bench is more practical in software generation evalaution, as it aligns closely with real-world software development process. By incorporating unit tests at both the task and function levels, it establishes a rigorous and precise mechanism for evaluating software generation quality. Additionally, RSD-Bench demonstrates strong human alignment (0.9922), providing a reliable evaluation metric. This paper also conducts analysis the reasonality of RSD-Bench in comparison to existing benchmarks, demonstrates its necessity. \n\n- This paper provides thorough experiments and analyses that robustly demonstrate EvoMAC’s effectiveness and performance. EvoMAC’s strong performance on both the RSD-Bench and HumanEval benchmarks highlights its high quality and efficacy in handling complex coding tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents EvoMAC, a self-evolving multi-agent collaboration (MAC) network designed to advance LLM-based multi-agent systems beyond function-level tasks to software-level coding tasks. EvoMAC employs a unique textual backpropagation mechanism to iteratively update agents and their connections in response to text-based environmental feedback, effectively enhancing task completion without human intervention. By formulating the evolving process to analogize neural network training, EvoMAC provides a clear structure for defining and extracting improvements. This approach underscores the significance of iterative refinement in the software generation process, enabling continuous improvement and adaptability in complex coding tasks. \n\nTo evaluate EvoMAC, the authors introduce RSD-Bench, a novel benchmark with complex and diverse software requirements that includes automated requirement verification. EvoMAC demonstrates superior performance on RSD-Bench and the HumanEval function-level benchmark, outperforming state-of-the-art methods and showcasing its robustness and adaptability across various evolving iterations and LLM configurations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Existing studies, such as [1-4] have also explored automation in LLM-based multi-agent collaboration. Please compare the differences between EvoMAC and these works. \n\n- EvoMAC's updating process includes removing agents that have completed their tasks. Can the entire agentic workflow be replayed once a task is finished, or is the removed agent permanently excluded from further iterations? \n\n- Given that EvoMAC includes multiple evolutionary iterations, direct comparisons with standard multi-agent frameworks may not be entirely fair. Could you also provide the number of LLM calls for tasks in RSD-Bench? This metric would offer a more clearer understanding of EvoMAC’s performance.\n\n- EvoMAC primarily focuses on models like GPT-4, Claude 3.5, and Gemini, but it is unclear if the framework can adapt to less powerful models, such as GPT-3.5 or open-source options like DeepSeek. Presenting results across a broader range of LLMs would support EvoMAC’s claims of robustness and adaptability.\n\n- Can the authors provide additional examples of the unit tests designed within RSD-Bench?\n\n- The Testing Team’s performance significantly impacts the Coding Team's potential, particularly in the HumanEval benchmark. How is the Testing Team’s performance evaluated to ensure alignment with target performance objectives and to prevent divergence? Additionally, how is the Testing Team’s performance quantified within RSD-Bench? \n\n- The paper does not specify the stopping criteria for EvoMAC’s iterative evolution process. Could the authors provide details on the stopping mechanism or criteria? \n\n- The paper lacks specific settings for the Coding Team in both the HumanEval and RSD-Bench benchmarks. Please provide these details to improve clarity on the experimental configuration and consistency across benchmarks.\n\n- Could the authors showcase additional examples of the textual gradient analysis and the updating process during the evolution for HumanEval and RSD-Bench?\n\n\n\n[1] Liu Z, Zhang Y, Li P, et al. Dynamic llm-agent network: An llm-agent collaboration framework with agent team optimization[J]. arXiv preprint arXiv:2310.02170, 2023.\n\n[2] Zhuge M, Wang W, Kirsch L, et al. GPTSwarm: Language Agents as Optimizable Graphs[C]//Forty-first International Conference on Machine Learning.\n\n[3] Hu S, Lu C, Clune J. Automated design of agentic systems[J]. arXiv preprint arXiv:2408.08435, 2024.\n\n[4] Qian C, Xie Z, Wang Y, et al. Scaling Large-Language-Model-based Multi-Agent Collaboration[J]. arXiv preprint arXiv:2406.07155, 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- When explaining their approach (section 3.1), the authors did not mention problems regarding the context window of most LLMs. Since requirements can contain quite a large amount of textual data, is the approach capable of dealing with it without extra techniques (e.g., RAG)? If not, even though the authors did not highlight it as a problem, the context window limitations should be mentioned." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is very well written and clearly explained. The two proposed contributions (the EVoMAC approach and RSD-Bench dataset) are consistent with the objective expressed in the introduction.\nAltough the problem of how to (self-)organize multi-agents is not exactly a new problem, the rise of agentic approaches using LLMs brought new traction to this challenge, and the authors addressed a pain point on these approaches when dealing with complex software development. Indeed, most solutions rely only on function-level and ignore the requirements engineering perspective, which ultimately leads the developers to half-baked solutions. This is not the case with the proposed EvoMAC. It takes account of the particularities of user requirements when organizing the agents initially and also considers the evaluation of the generated code against initial requirements.\nTheir inspiration for neural network algorithms is broad. However, it is also a clever idea that sounds original and demonstrates a creative adaptation of backpropagation principles to multi-agent collaboration.\nThe authors also provide sound experimentation on how they implement their approach and when comparing with similar solutions.\nTogether with the approach, the authors provided a well-defined benchmark to overcome the limitations of the existing ones. A more detailed description of the RSD-bench could indeed compose a contribution per se.\nThe RSD bench is tailored to requirements engineering, which could address common limitations in agent-based collaboration research by bridging functionality-based assessments with requirement-based evaluations." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors provide a paper with twofold contribution: i) a new approach to developing multi-agent collaboration systems for software development and ii) a benchmark to compare their approach with existing approaches to solving the same problem.\nRegarding (i), their approach (called EvoMAC) intends to overcome the limitations of similar multi-agent collaboration systems development workflows, mainly on adaptability and generalization. EvoMAC was designed to mimic standard neural network development, meaning that the errors are \"backpropagated\" throughout the agents, creating a self-adaptive multi-agent network.\nRegarding (ii), the benchmark dataset RSD-bench was created based on the requirements of the software being developed, in contrast to the existing ones, which are usually based on the functionality of the generated code/software (i.e., unit tests).\nThe paper's results show that the EvoMAC approach outperforms other approaches when applying the RSD-Bench to adapted versions of standard benchmark datasets like HumanEval." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Most of the paper's weaknesses are minor problems that can improve its quality, even though I don't consider them mandatory.\n- I truly believe that the mathematical explanation of the problem (mainly between lines 169-184) is unnecessary, even though I see that this somehow facilitates some later explanations. Altough it provides some kind of generalization, the approach still relies on LLMs that are probabilistic by nature and then are not mathematically generalizable (i.e., the function $\\phi(\\cdot,\\cdot)$ is essentially a message sent to an LLM, so it does not behave exactly as a mathematical generic function as the authors may want to demonstrate). I advise the removal of the mathematical explanation. It can help sometimes but does not add value to the paper.\n- The paper lacks an actual example/use case in the opposite (or complementary, if the authors decide to keep it) of the mathematical explanation. The authors use this type of explanation in lines 270-271. This kind of example can be used as a running example throughout the paper.\n- Minor problems:\n\t- Typo on the Figure 3 caption. \"indrection\"\n\t- Figures 6 and 8 can be improved to ensure legibility and accessibility, maybe by adjusting font size and contrast.\n\t- Figure 1 is a bit confusing. I think it can be split into 3 different figures or be better explained in the paper. If kept as is, I suggest adding brief explanations of the arrows, such as \"add,\" \"revise,\" and \"remove.\"\n\t- Figure 5 can be rethought with a better color choice, especially considering the accessibility of the paper." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024selfevolving,\ntitle={Self-Evolving Multi-Agent Networks for Software Development},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4R71pdPBZp},\nnote={under review}\n}" }, "abstract": { "value": "LLM-driven multi-agent collaboration (MAC) systems have demonstrated impressive capabilities in automatic software development at the function level. However, their heavy reliance on human design limits their adaptability to the diverse demands of real-world software development.\nTo address this limitation, we introduce EvoMAC, a novel self-evolving paradigm for MAC networks. Inspired by traditional neural network training, EvoMAC obtains text-based environmental feedback by verifying the MAC network's output against a target proxy and leverages a novel textual backpropagation to update the network.\nTo extend coding capabilities beyond function-level tasks to more challenging software-level development, we further propose RSD-Bench, a requirement-oriented software development benchmark, which features complex and diverse software requirements along with automatic evaluation of requirement correctness.\nOur experiments show that:\ni) The automatic requirement-aware evaluation in RSD-Bench closely aligns with human evaluations, validating its reliability as a software-level coding benchmark.\nii) EvoMAC outperforms previous SOTA methods on both the software-level RSD-Bench and the function-level HumanEval benchmarks, reflecting its superior coding capabilities." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Software development", "LLM", "Multi-agent collaboration" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/ad7adb5f92daccd3d18429f235d6e265635b7dd8.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/bccf047de25755d94d059c3f5f1dcc5000452fe4.pdf" }, "title": { "value": "Self-Evolving Multi-Agent Networks for Software Development" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4RHdGVimNA
StagFormer: A Staggered Transformer for Decoding Layers in Parallel
main
Active
decoder only language models;transformers;staggered execution;pipelining;parallel decoding;efficiency
foundation or frontier models, including LLMs
3;3;3;5
4;4;4;4
2;2;2;3
2;3;3;3
2;3;1;2
3.5
4
2.25
2.75
2
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- For KV-caches, the total KV caches are a little increased by the amount of one layer for cross-attention in upper layers, rights?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- StagFormer architecture is interesting, and has very good potential for both performance and throughput.\n- The idea of parameter sharing and recurrent decoding looks good." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposed a new architecture StagFormer, which stagger the time dependency between the lower and upper layers. The overall design seems a little non-intuitive, but has a lot of potential for throughput and performance. For example, parameter sharing or local cross-attention could yield better throughput." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- I like the concept and potential of this paper, but I believe that this paper is not well-organized, and looks like unfinished work yet. For example, there is missing reference in L.267 (I guess this refers to Table 3), there are a few results for proof of concept.\n- Table 3 is showing few-shot results for gray, blue, red lines in Figure 4 (correct me if I’m wrong.) I wonder why shared-weights StagFormer (blue) outperforms Baseline 2.8B params (red) in some benchmarks, even though it shows higher loss values.\n- What makes StagFormer 2.9B to outperform Baseline 2.8B params in Table 1? Is it due to cross-attention in upper layers? This looks somewhat interesting and also confusing because I thought the changed structure (using previous timestep’s intermediate activations) could degrade performance a lot.\n- How did the authors measure the decoding time in Table 2? Running separate parameters in parallel is not trivial, I believe. Is it actual time or hypothetical time by assuming parallel execution of them?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Can you describe the architecture shape (vocab size, qkv heads, embedding dimensions) and its justification? The vocab size of 256K is quite high for models of this size.\n1. In Lines ~499-501, the authors mention that cross-attention is linear to input length instead of quadratic with window size 1. Isn't it linear with any fixed window size? Considering that the cost of attention mainly stems from KV cache IO during decoding, I think the constant factor with a window size as small as 128 makes the cost of cross-attention negligible compared to self-attention (especially when expanding to modern context lengths of 8K or more).\n 1. However, the *increase* in performance when going from full cross-attention (1024) to windowed attention with window size 512 and 128 is strange. Can the authors justify this increase in performance?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The idea and architecture design are very novel\n1. The authors propose numerous variants which showcase the potential extension of the idea across various axes–parallel execution, weight sharing, recurrent computation.\n1. The architecture shows clear advantages over vanilla transformers across its variants\n1. The writing is easy to follow and visual depiction of the architecture and its variants are superb." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors present the Staggered Transformer (StagFormer) and its variants which relieve sequential dependancies in the decoding pipeline to enable higher levels of parallel execution.\n\nConsider a transformer with two stacks of layers, A (bottom half) and B (upper half). In vanilla transformers, the input token embedding is passed to stack A. Then, the output of stack A is passed to stack B. All layers apply self-attention on outputs of the previous layer.\n\nIn the baseline StagFormer (`Separate-Weights`), stack A is the same. However, stack B takes in the input token embedding rather than the output of stack A.\nTo supplement this, stack B applies cross-attention on the final outputs of stack A, up until the previous token. In other words, stack B cross-attends to the outputs of *all previous input tokens* from stack A, instead of directly inputting that of the *current* input token. This relieves the dependency of stack B on stack A, within a single decoding step, thus both A and B can be computed simultaneously.\n\nThe authors investigate many variants of this design:\n1. `Shared-Weights`: this is where stack A and stack B share the same model parameters (excluding the cross-attention layers which are unique to stack B).\n2. `Recurrent, Shared-Weights`: this is a unique decoding method for the `Shared-Weights` trained model. In `Shared-Weights` stack A and B are identical, except that stack B applies cross-attention to outputs from stack A. Essentially, the shared stack S (= A = B) is first forwarded without cross-attention, and then forwarded a second time *with* cross-attention, attending to outputs from the first forward pass. The `Recurrent` setting refers to that where the first forward pass is skipped, and cross-attention in the second pass attends to outputs of the \"second\" pass from the previous decoding step.\n3. `p > 2`: this is where more than two stacks are considered.\n\nWhen compared to vanilla transformers pretrained from scratch, StagFormers show various advantages, mainly:\n- `Shared-Weights 2x18L`: StagFormer outperforms the vanilla 18L baseline (with roughly same parameters) in both perplexity and average task performance. Using recurrent decoding (roughly matching 18L baseline computation), average task performance lies between the two. StagFormer underperforms the vanilla 36L baseline with roughly same computation in perplexity, but performs comparably on tasks.\n- `Separate-Weights 2x18L`: StagFormer outperforms the vanilla 36L baseline (with roughly same parameters and compute) in both perplexity and task performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. **Memory bottlenecks during decoding may hinder benefits of parallel execution, which is not discussed**: LM decoding is typically bottlenecked by memory rather than compute (see references below). When batch size x context length is small, memory access is dominated by model parameter access. Otherwise, memory access is dominated by KV cache access. While StagFormer can conceptually *parallelize* execution of layers, the associated memory access load cannot be parallelized. In fact, the cross-attention layer will add additional KV cache access overhead. These are critical to assessing the actual wallclock time benefits of decoding with StagFormers compared to vanilla transformers, but is not discussed.\n 1. Different variants of StagFormers will have different memory bottlenecks. Examples:\n 1. All variants: cross-attention is added in half of layers. Therefore, the overall KV cache access overhead will increase by 50% (relative to that of self-attention, used in all layers). This will have a larger effect on decoding time as batch size x sequence length becomes large.\n 1. `Separate-Weights`: both stacks can be executed in parallel, but the memory load is identical as the parameters of both stacks must be retrieved from memory. This means that wall-clock time should typically be identical to vanilla transformers, as decoding is bottlenecked by memory access. `Shared-Weights` can solve this issue.\n 1. **It is unclear which StagFormer variant is used in Table 2, raising questions on the performance vs latency comparison**: While Table 2 states that a \"comparable quality StagFormer\" is 33% faster than baseline transformer during decoding, the exact variant is unclear. Given the reasons above, it seems likely that this is the `Shared-Weights 2x18L` variant. While its average task performance is comparable to baseline 36L, its PPL is in the middle of that between vanilla 18L and 36L. It would be misleading to describe this variant as \"comparable quality\" to vanilla 36L.\n 1. **Missing comparison of performance vs latency across model variants**: Expanding on the point above, a comparison of prefill/decode time across model variants will provide a clear picture on the performance vs latency benefits of each model variant. This could take the form of a single table that lists the PPL, task performance, and prefill/decode time for each model. In the case of `p > 2, Shared-Weight` variants, I believe this may actually reveal some advantages in terms of latency.\n 1. **The additional KV cache overhead of cross attention may slow down decoding for longer contexts**: Since KV cache overhead is quadratic to context length, the decode time advantages as shown in Table 2 will likely diminish with longer contexts, especially in batch decoding. Given the relatively short context length of 1024 tokens considered in this study, compared to modern LLMs with 8K+ context, measurement on longer contexts and larger batch sizes can help gauge the potential of the architecture.\n1. **Misleading task performance of `Recurrent` variant**: In Table 3 (for example), the performance of various tasks are identical between the `Shared-Weights 18L` model and its `Recurrent` counterpart. This is likely because the tasks are measured in a teacher-forcing setting, where the outputs of the prefill stage are used for evaluation. This does not represent the task performance of the `Recurrent` setting, as recurrence is only applied to decoding, as explained in Section 3.2.\n1. **Experimental results on model variants are hard to follow**: The organization of the results section could be improved to make the comparison between different model variants more clear.\n 1. Within tables, variations could be better indicated with separate columns, task names could be shortened for space, latency metrics could be included, etc.\n 1. Results on different variants are presented in multiple tables without a clear organization.\n1. **Incomplete writing**: \"(TODO)\" in Line 385, the reference error \"??\" in Line 267, and numerous typos suggest that this is an incomplete manuscript that is not ready for review.\n\nReferences on memory bottlenecks during inference\n- [Efficiently Scaling Transformer Inference](https://arxiv.org/abs/2211.05102)\n- [LLM Inference Unveiled: Survey and Roofline Model Insights](https://arxiv.org/abs/2402.16363v4)\n- [Taming Throughput-Latency Tradeoff in LLM Inference with Sarathi-Serve](https://arxiv.org/abs/2403.02310)\n- [Block Transformer: Global-to-Local Language Modeling for Fast Inference](https://arxiv.org/abs/2406.02657)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. How does varying the depth of individual stacks in StagFormer affect the trade-off between decoding speed and model quality?\n2. What factors determine the optimal number of stacks for a given application, balancing computational efficiency and performance?\n3. Could the staggering concept be extended to encoder-decoder Transformers, like those used in machine translation?\n4. How well could StagFormer be combined with other techniques, like quantization or knowledge distillation, to further enhance decoding efficiency?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. StagFormer introduces a unique method to break the sequential dependency of layers in Transformers, enabling parallel execution.\n2. Experiments demonstrate significant latency reduction while maintaining or even exceeding the quality of a standard Transformer.\n3. The paper investigates different StagFormer variants, offering flexibility and adaptability to various scenarios and resource constraints.\n4. The paper effectively explains the StagFormer concept and its variants, supported by clear diagrams and algorithms." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel Transformer architecture called StagFormer designed to improve the efficiency of decoding in Transformer-based language models by enabling the parallel execution of layers along the depth axis." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Limited exploration of p > 2. While the paper explores StagFormer with more than two stacks, it acknowledges performance degradation and the need for further research in this area.\n2. The paper mentions the communication cost associated with parallel execution but doesn't offer concrete solutions to mitigate it.\n3. While the Pile dataset is comprehensive, evaluating on additional datasets would strengthen the generalizability of the findings.\n4. Comparing StagFormer with other methods for efficient Transformer inference, such as speculative decoding, would provide a more comprehensive perspective." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Addressing the weaknesses outlined above would improve the paper." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- staggered computation leads to significant improvements in per-time-step decoding speeds while slightly improving performance\n- provides results and analysis of different variants of staggered transformers that further explores the architecture's efficacy" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel transformer architecture that effectively reduces the number of sequential steps (layers) during the decoding process by staggering the computation across different time-steps. This allows for improved parallelism during decoding individual sequences, providing speedups during inference." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Biggest critique is that it lacks comparative analysis of staggering computation vs. simply increasing the width of the model and lowering the number of layers, as this increases per layer parallelism while decreasing the number of layers leading to a similar improvement in decoding speed.\n- This technique is possibly only useful for speeding up decoding when only a single sequence is being decoded. A non-staggered model could in theory process twice the batch size as it has half the parallelism (and hence half the per layer memory requirement) of a model staggered with p=2. \n- StagFormer is possibly slower to train (as inferred by its slower pre-filling speed)\n- Paper could be further refined (minor critique): \n - Some references are configured incorrectly (Table ?? in page 5, \"TODO\" in page 8)\n - Plots have unnecessary information (Figure 4 doesn't need texts like /1/summarize/train)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024stagformer,\ntitle={StagFormer: A Staggered Transformer for Decoding Layers in Parallel},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4RHdGVimNA},\nnote={under review}\n}" }, "abstract": { "value": "Standard decoding in a Transformer based language model is inherently sequential as we wait for a token’s embedding to pass through all the layers in the network before starting the generation of the next token. In this work, we propose anew architecture StagFormer (Staggered Transformer), which staggered execution along the time axis and thereby enables parallelizing the decoding process along the depth of the model. We achieve this by breaking the dependency of the token representation at time step $i$ in layer $l$ upon the representations of tokens until time step $i$ from layer $l−1$. Instead, we stagger the execution and only allow a dependency on token representations until time step $i−1$. The later sections of the Transformer still get access to the ”rich” representations from the prior section but only from those token positions which are one time step behind. StagFormer allows for different sections of the model to be executed in parallel yielding up to 33% speedup in decoding while being quality neutral. We also explore many natural variants of this idea. We present how weight-sharing across the different sections being staggered can be more practical in settings with limited memory. We show how one can approximate a recurrent model during inference using such weight-sharing. We explore the efficacy of using a bounded window attention to pass information from one section to another which helps drive further latency gains for some applications. We also explore demonstrate the scalability of the staggering idea over more than 2 sections of the Transformer." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "decoder only language models", "transformers", "staggered execution", "pipelining", "parallel decoding", "efficiency" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/60ecfc1e9a442a4b3208ef478624fffea1214859.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "StagFormer: A Staggered Transformer for Decoding Layers in Parallel" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4RRmy9iw3c
AutoAL: Automated Active Learning with Differentiable Query Strategy Search
main
Active
Active Learning;Differentiable Bi-level Optimization
unsupervised, self-supervised, semi-supervised, and supervised representation learning
3;3;6;6
1;3;4;5
2;2;3;3
2;2;3;2
2;2;3;3
4.5
3.25
2.5
2.25
2.5
0.845154
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weaknesses" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The work handles an important ML problem; Active Learning (AL) with differentiable strategy search.\n- The proposed method is technically sound\n- Writing is clear and easy-to-follow" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work introduces AutoAL, a differentiable active learning (AL) strategy search method that builds on existing AL sampling strategies. AutoAL contains two neural networks, SearchNet and FitNet, which are co-optimized through a differentiable bi-level optimization framework to identify optimal AL strategies for different tasks. Experimental results show that AutoAL outperforms individual AL algorithms and other selective approaches." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Hybrid AL methods that combine uncertainty and diversity have been demonstrated to perform effectively in a variety of situations. It would be beneficial to include examples where the proposed AutoAL approach is particularly necessary or advantageous for specific applications.\n- The bi-level optimization within AutoAL relies on labeled data. How does the algorithm perform if the labeled data is skewed or imbalanced? For instance, if the initial labeled set suffers from class imbalance, might this severely impair the algorithm? The assumption of a randomly selected initial set, as used in the current experiments, appears to be less practical.\n- Similarly, is there a guarantee that the AutoAL approach, trained with labeled data from the current AL round, will identify the most informative samples from the unlabeled pool in the subsequent AL round? A more detailed analysis of the algorithm's guarantees is necessary.\n- The approach of training an additional network for sample selection shows similarities to [1] employing meta-learning with an additional network for querying.\n- Can the proposed method be applied to the open-set AL problem [1]?\n- The datasets used in the experiments are of small scale. It is imperative to validate the performance on large-scale datasets, such as ImageNet.\n\n---\n[1] Meta-Query-Net: Resolving Purity-Informativeness Dilemma in Open-set Active Learning, NeurIPS, 2022" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "NA" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- How does AutoAL perform in terms of computational efficiency compared to traditional methods on large-scale datasets?\n- What mechanisms ensure robustness against convergence issues and local minima in the bi-level optimization?\n- Can AutoAL be extended to generate new active learning strategies dynamically rather than relying solely on a predefined candidate pool?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- A new approach that automates active learning strategy selection through differentiable optimization, surpassing manual and non-differentiable methods.\n- Effective integration of strategy selection and data modeling via the bi-level optimization of SearchNet and FitNet.\n- Flexibility and adaptability, allowing incorporation of multiple existing strategies and tailoring to specific tasks and data distributions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents AutoAL, a framework for automated active learning that optimizes query strategy selection using differentiable methods. Traditional active learning approaches often rely on predefined strategies like uncertainty sampling or diversity sampling, which may not perform optimally across different datasets or tasks. AutoAL addresses this limitation by integrating existing active learning strategies into a unified framework. It employs two neural networks, SearchNet and FitNet, within a bi-level optimization structure to automate the selection process. By relaxing the discrete search space of active learning strategies into a continuous domain, AutoAL enables gradient-based optimization, enhancing computational efficiency and adaptability. Experimental results demonstrate that AutoAL consistently outperforms individual strategies and other selective methods across various natural and medical image datasets, highlighting its effectiveness and versatility." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Increased complexity and computational overhead due to the additional neural networks and bi-level optimization, potentially challenging scalability on large datasets.\n- Dependence on a predefined pool of candidate strategies, which may limit performance if optimal strategies are not included.\n- Lack of in-depth theoretical analysis explaining the method's effectiveness and the conditions under which it performs best, possibly affecting generalizability." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 1 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "see above" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The studied strategy selection problem for active learning is important.\n2. The bi-level optimization strategy is rational." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes an active strategy query algorithm where the optimal query strategy is selected by a bi-level optimization network. In particular, the authors aggregates the query strategies by a scoring function implemented as a Gaussian Mixture Model. Then, the authors split out a validation set from the labeled samples to guide scoring function calculation. Experimental results show that the proposed method supasses the baselines." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. There is still room for improvement in the paper writing.\n\n 1.1. It's unnecessary to name the two networks \"fitnet\" and \"searchnet,\" as it seems intended to make people think this is a significant innovation. However, in meta-learning, this kind of separated network design and bi-level optimization paradigm is very common.\n\n 2.1. The notations are somewhat confusing. For example, the authors didn't clearly define the output of the search net in Section 3.2, making it hard to understand Sec 3.2. It wasn’t until I finished reading the method section that I realized the output is actually a sample-wise score, forming an aggregation of scores for different queries.\n\n2. The novelty of this paper is relatively limited. The proposed meta-learning/bi-level optimization has been applied to AL [1,2]. Also, I think the algorithm design is too complicated.\n\n3. The motivation for modeling the scores by GMM distributions is unclear. Why is the score function of each strategy distributed as a Gaussian Distribution? Why is the final score function a linear weighted aggregation of different strategies? The authors should provide a concrete application or example.\n\n4. The comparison methods are too outdated, with the latest ones being LPL and BADGE from 2019. Additionally, the datasets are quite limited; despite the complexity of the method design, only CIFAR and MNIST datasets were used. Validation should be conducted on the ImageNet dataset (at least Image100). Otherwise, given that the algorithm design is much more complex than the baselines, its effectiveness cannot be convincingly demonstrated.\n\n[1] Kunkun Pang, Mingzhi Dong, Yang Wu, and Timothy Hospedales. 2018. Meta-learning transferable active learning policies by deep reinforcement learning. International Workshop on Automatic Machine Learning (ICML AutoML 2018).\n\n[2] https://grlplus.github.io/papers/96.pdf" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "1. My only concern is the efficiency of the AutoAL algorithm. Although more efficient solutions have been proposed to solve second-order optimization problems, I cannot find any relevant experiments to verify them." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See the Weaknesses." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The problem studied in this paper is valuable. This paper presents the first differentiable AL strategy search method. \n2. The proposed AutoAL approach is interesting and easily followed.\n3. The paper is well organized. \n4. The experiments validate the effectiveness of the proposed approach, and the ablation study in Figure 5 is insightful." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper attempts to tackle the \"generalization problem\" of active learning (AL) algorithms across data scenarios. I believe this is a core issue in the current active learning field. This paper proposes AutoAL, a differentiable AL strategy search method to select the most effective AL sampling strategies in each iteration. It consists of two neural nets, named SearchNet and FitNet, which are optimized concurrently under a differentiable bi-level optimization framework. The experiments on multiple datasets validate the effectiveness of the proposed approach." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. My only concern is the efficiency of the AutoAL algorithm. Although more efficient solutions have been proposed to solve second-order optimization problems, I cannot find any relevant experiments to verify them." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "To our knowledge, we present the first automatic AL query strategy search method that can be trained in a differientiable way." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024autoal,\ntitle={Auto{AL}: Automated Active Learning with Differentiable Query Strategy Search},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4RRmy9iw3c},\nnote={under review}\n}" }, "abstract": { "value": "As deep learning continues to evolve, the need for data efficiency becomes increasingly important. Considering labeling large datasets is both time-consuming and expensive, active learning (AL) provides a promising solution to this challenge by iteratively selecting the most informative subsets of examples to train deep neural networks, thereby reducing the labeling cost. However, the effectiveness of different AL algorithms can vary significantly across data scenarios, and determining which AL algorithm best fits a given task remains a challenging problem. This work presents the first differentiable AL strategy search method, named AutoAL, which is designed on top of existing AL sampling strategies. AutoAL consists of two neural nets, named SearchNet and FitNet, which are optimized concurrently under a differentiable bi-level optimization framework. For any given task, SearchNet and FitNet are iteratively co-optimized using the labeled data, learning how well a set of candidate AL algorithms perform on that task. With the optimal AL strategies identified, SearchNet selects a small subset from the unlabeled pool for querying their annotations, enabling efficient training of the task model. Experimental results demonstrate that AutoAL consistently achieves superior accuracy compared to all candidate AL algorithms and other selective AL approaches, showcasing its potential for adapting and integrating multiple existing AL methods across diverse tasks and domains." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Active Learning", "Differentiable Bi-level Optimization" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/8cdd3d608b6f90a698fca18f279a8639dc3683d3.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "AutoAL: Automated Active Learning with Differentiable Query Strategy Search" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4S2L519nIX
Pushing the Limits of All-Atom Geometric Graph Neural Networks: Pre-Training, Scaling, and Zero-Shot Transfer
main
Active
Geometric Graph Neural Networks;Self-supervised Pre-training;Scaling;Zero-shot Transfer;Molecular Representation
applications to physical sciences (physics, chemistry, biology, etc.)
3;5;5;8
3;3;4;3
2;3;2;3
1;2;2;3
1;3;2;4
5.25
3.25
2.5
2
2.5
-0.080845
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Many of the evaluations are on datasets for specific molecules, which are very useful for understanding specific model behavior, but should be complemented by more general evaluations, especially in the context of examining scaling behavior. Can the authors comment on or provide additional evidence that these specific, bespoke evaluations are connected to more general relationships between pre-training setups and downstream evaluations?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper continues an important line of work in exploring the utility of pre-training and scaling GNNs for molecular learning tasks. Unlike other dominant areas of deep learning, all-atom molecular representation learning relies on GNNs and does not directly benefit from advances in scaling sequence-based models.\nThe authors explore model size, aspect ratio, nearest neighbor cutoff radius, and architecture to provide a comprehensive look into the scaling behavior of molecular GNNs." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper the authors extend previous work on scaling laws for all-atom graph neural networks applied to self-supervised and supervised training. They investigate aspects of pre-training task choice, different downstream evaluations, GNN model size and aspect ratio, as well as the radial cutoff for constructing nearest neighbor graphs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Like most molecular GNN works, the authors are limited by the available evaluations (e.g., QM9). xxMD is an interesting evaluation, but these datasets are limited to a small set of specific molecules. QM9 with B3LYP in particular is not informative, and the authors might consider newer benchmarks like POLARIS or a subset of the Therapeutic Data Commons to strengthen their evaluations.\nThe paper does not offer a clear and concise summary of recommendations based on the empirical findings, which is essential to achieving the stated aim of inspiring practitioners to rethink GNN training." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. The paper claims to demonstrate zero-shot transfer learning using pre-trained Geom-GNNs. However, the described method seems closer to downstream task fine-tuning, given that a separate network is trained with the VAMP score objective. Could the authors clarify how this approach qualifies as zero-shot transfer rather than fine-tuning?\n\n2. Limited Pre-Training Approaches:\nThe pre-training in this work is restricted to coordinate denoising. Given that prior work has successfully used a combination of node mask prediction and coordinate denoising for improved performance. Specifically, how might adding a node mask objective influence for the molecular kinetics tasks? Would it enhance the model’s ability to generalize across different molecular conformations?\nAdditionally, could the authors hypothesize the potential impact of such an extended pre-training approach on scaling behavior? \n\n3. While the effectiveness of ET and ViSNet is demonstrated on several tasks, the study lacks comparisons with invariant feature-based networks (e.g., SchNet, DimeNet, GemNet) and tensor product-based networks (e.g., Equiformer, MAC). Could the authors provide insights into how their method might perform relative to these alternative architectures?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. Demonstrates substantial performance improvements in kinetic modeling on VAMP score metrics by utilizing denoising pretraining techniques.\n2. Applies the self-supervised pretraining approach to a variety of downstream tasks, successfully proving its effectiveness across applications.\n3. Examines scaling laws in both standard equivariant models and pre-trained equivariant models, finding that even pre-trained models diverge from typical neural scaling laws due to issues like under-reaching and over-smoothing. The author suggests that, while scaling models has its benefits for supervised and unsupervised tasks, it may be more effective to focus on addressing data label uncertainty and using active token mixing to mitigate information bottlenecks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper investigates denoising pretraining and potential scaling laws for geometric graph neural networks (GNNs) on supervised tasks. These GNNs are pre-trained on noisy coordinate data to learn how to denoise and reconstruct the original coordinates. The effectiveness of this approach is tested on various downstream applications, including molecular kinetics, fold classification, and energy and force prediction. Additionally, the paper examines the scaling behavior of these models and highlights specific limitations in supervised prediction tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The author claims to be the first to demonstrate the pre-trained Geom-GNNs’ capability as zero-shot transfer learners. However, I would consider this more appropriately described as downstream task fine-tuning. In zero-shot learning, a model test on data source is directly applied to unseen data class without additional training (e.g., training on English and French, then testing on Chinese without further adjustments). Unlike this approach for molecular kinetics, the paper involves training a separate network with the VAMP score objective.\n2. The pretraining methods in this work are limited to coordinate denoising. Other approaches [1,2] that leverage both node mask prediction and coordinate denoising have already proven effective.\n3. Although the paper demonstrates the effectiveness of ET and VisNet on several tasks, it does not include evaluations on invariant feature-based networks (such as SchNet, DimeNet, or GemNet) or tensor product-based networks like Equiformer and MAC.\n\n[1] Cui, Taoyong, et al. \"Geometry-enhanced pretraining on interatomic potentials.\" Nature Machine Intelligence 6.4 (2024): 428-436.\n[2] Zhou, Gengmo, et al. \"Uni-mol: A universal 3d molecular representation learning framework.\" (2023)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "as Weaknesses." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. **Innovation**: The study presents a novel perspective, exploring an area that remains under-researched (to my knowledge), offering significant insights for the development of Geom-GNNs.\n2. **Clarity and Structure**: The article is well-organized, with clear presentation and summary of experiments and viewpoints, facilitating reader comprehension.\n3. **Robust Experimentation**: The experimental design is thorough, effectively supporting the authors’ conclusions.\n4. **Exploration of Zero-Shot Transfer Capability**: The investigation into the zero-shot transfer ability of Geom-GNNs is intriguing, with experiments indicating their potential as excellent zero-shot transfer learners.\n5. **Pre-training Insights**: Through extensive denoising pre-training tasks, valuable experiences have been gained regarding the pre-training of Geom-GNNs, including aspects such as model width, depth, aspect ratio, and the cutoff radius in geometric atomic graph construction, providing rich guidance for pre-training.\n6. **Advancement of Unified Architecture**: Given the widespread attention and efforts in the research of all-atom Geom-GNNs, this study effectively inspires researchers to reconsider the design of Geom-GNN architectures and the adjustment of training strategies, thereby promoting the development of a unified Geom-GNN architecture." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work presents a novel investigation into whether pre-trained Geom-GNNs (Graph Neural Networks for Conformational Molecules) possess efficient and transferable geometric representation capabilities, particularly in addressing the low generalization of models typically trained on specific tasks. The authors also aim to introduce “Neural Scaling Laws” to summarize the performance behavior of these pre-trained Geom-GNNs. However, it is unfortunate that the experimental results indicate that Geom-GNNs do not adhere to power-law scaling laws and fail to demonstrate predictable scaling behavior across various supervised tasks. Furthermore, the findings reveal that the all-atom embedding graph representations derived from Geom-GNNs exhibit excellent expressive capabilities, suggesting that Geom-GNNs can function effectively as zero-shot transfer learners." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. In Figure 6, which explores different model widths, even though the x-axis represents the total number of parameters (with model depth held constant), it would be more beneficial to indicate the model width for each point in the legend to enhance result presentation. Similarly, in Figures 4 and 7, which demonstrate the impact of model depth, using the legend to specify the exact number of layers might be more effective. In general, clear legends are always advantageous.\n2. The comparison between models trained from scratch and those fine-tuned in Section 6.1 could be more comprehensive if extended to include model depth. Previous discussions (albeit in the context of pre-training) have presented certain viewpoints, and it is anticipated that these would also have significant effects during fine-tuning." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- Why is the VAMP task considered zero-shot?\n- Appendix D: Why is there a difference between datasets for pretraining and downstream? According to Table 2, QM9 there seem also experiments with pretraining. Are models in Table 3 not fine-tuned?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "relevant research topics in the paper:\n- pretraining of (geometric) GNNs\n- scaling behavior of GNNs\n- oversmoothing, underreaching\n\nexperiments:\n- experiments considered not only random splitting, but also scaffold and temporal splits" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In their work the authors empirically studied properties of pre-trained geometric GNNs (i.e., GNNs with coordinates attached).\nThey especially seemed to consider pretraining via denoising as introduced by Zaidi et al. (2022).\nThe authors consider several downstream tasks: molecular dynamics (kinetics modeling), fold classification, prediction of quantum-chemical properties, and, molecule conformations\nThey study properties such as power-law scaling and find that the geomtric GNNs do not follow that on the pre-training task.\nThe authors conclude that geometric GNNs \"saturate early and are bounded by issues such as under-reaching and over-smoothing\" and further that \"other aspects become more crucial, such as addressing data label uncertainty and employing active token mixing to mitigate the information bottleneck.\"" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "In general the largest problem to my opinion is that the authors lack to specify a clear research goal. Methodologically there seems not too much novelty.\nBut if it is primarily an empirical research paper it seems very important to me that the research goal is clearly defined and dependent on that authors have\nto reason why they are selecting the problems they look at and why they are selecting the methods they compare to. If a new benchmark dataset is used (as e.g., scaffold QM9), then empirical evaluations/comparisons should take into account at least some previously suggested methods (which are ideally complementary to each other to better see the potentials of each of the methods on the new benchmark and being able to compare to what authors say).\n\\\n\\\nDetails:\n\\\n\\\n**hard to see the novelty in the paper**:\\\nFact that pretraining is useful was already found e.g. by Zaidi et al. (2022).\\\nNo new methodology seems to be suggested (or is \"token mixing\" the new method)?\\\nStudy of scaling behavior might be interesting and to a certain degree novel for geometric GNNs, but no follow-up investigations seemed to be employed to draw formal connections to GNN properties like oversmoothing, underreaching, etc.\n\\\n\\\n**paper is structured in a strange way, which makes it hard to follow the paper**:\\\nIt is actually hard to understand what the research aim of the authors was.\\\nchapter 4, 5, and, 6, actually seem to be about empiricial experiment results.\\\nThe setups are partly however already explained in chapter 3.\\\nchapter 4 and 6 show performances on problems\\\nchapter 5 however studies power-law behavior and other ablations.\\\nIn sum it gets hard to follow the paper. Better reasoning why some experiments are applied at all (with respect to the general research goal) and why they are done as they are done is necessary (e.g., why which methods are selected for comparison or why which dataset is used).\n\\\n\\\n**experiments**:\\\nalthough there are some good points as mentioned in strengths (such as the splits), it will get hard to understand how large the impact of pretraining really is, as the authors only test very few method once with pretraining and once without pretraining.\\\n\\\nalso the there is no good argument in the paper, why the authors exactly compare to those methods they selected to compare to \ne.g.:\\\nfor molecular dynamics, why don't they compare to Timewarp\\\nfor QM9 and xxMD they could e.g. compare to \"E(n) Equivariant Graph Neural Networks\", \"SE(3)-Transformers\", DimeNet++, MACE, etc.\\\nAn option to get more impression on the significance of the author's results would be to additionally compare to standard QM9, etc. (where there are also a lot of method comparisons out).\n\\\n\\\n**minor points**:\\\n\"token mixing\" not defined, but heavily used\\\ngrammatical errors/typos: \"In silico molecular computation and simulation are indispensable tool in...\"" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "This paper explores using pre-trained geometric graph neural networks as effective zero-shot transfer learners for molecular conformation representation in out-of-distribution scenarios, and their scaling behavior under various setups." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024pushing,\ntitle={Pushing the Limits of All-Atom Geometric Graph Neural Networks: Pre-Training, Scaling, and Zero-Shot Transfer},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4S2L519nIX},\nnote={under review}\n}" }, "abstract": { "value": "Constructing transferable descriptors for conformation representation of molecular and biological systems has been a long-standing challenge in drug discovery, learning-based molecular dynamics, and protein mechanism analysis. Geometric graph neural networks (Geom-GNNs) with all-atom information have transformed atomistic simulations by serving as a general learnable geometric descriptors for downstream tasks such as interatomic potential and molecular property prediction. However, common practices involve supervising Geom-GNNs on specific downstream tasks, which suffer from the lack of high-quality data and inaccurate labels, potentially leading to poor generalization and performance degradation on out-of-distribution (OOD) scenarios, especially with quantum chemical data. In this work, we explored the possibility of using pre-trained Geom-GNNs as transferable and highly effective geometric descriptors for improved generalization. To explore their representation power, we studied the scaling behaviors of Geom-GNNs under self-supervised pre-training, supervised and unsupervised learning setups. We found the expressive power of different architectures can differ on the pre-training task. Interestingly, Geom-GNNs do not follow the power-law scaling on the pre-training task, and universally lack predictable scaling behavior on the supervised tasks with quantum chemical labels. More importantly, we demonstrate how all-atom graph embedding can be organically combined with other neural architectures to enhance the expressive power. Meanwhile, the low-dimensional projection of the latent space shows excellent agreement with conventional geometrical descriptors." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Geometric Graph Neural Networks", "Self-supervised Pre-training", "Scaling", "Zero-shot Transfer", "Molecular Representation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/80c45a708014eea392ed8db0e423c825ba02b4d0.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Pushing the Limits of All-Atom Geometric Graph Neural Networks: Pre-Training, Scaling, and Zero-Shot Transfer" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4S9bBbX1be
DriveArena: A Closed-loop Generative Simulation Platform for Autonomous Driving
main
Active
Autonomous Driving;Diffusion Model;Closed-loop Simulation
applications to robotics, autonomy, planning
3;6;6;6
4;4;3;5
2;3;3;3
2;3;3;3
2;3;3;3
5.25
4
2.75
2.75
2.75
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Can authors provide visualizations of generating consistent scenes based on slightly different conditions (e.g., the ego car moves differently), which is a key aspect for closed-loop evaluation?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. This submission targets an important problem in the field of autonomous driving: how to properly evaluate the performance of end-to-end systems. The submission introduces a closed-loop evaluation method, which is more reflective of real-world driving conditions compared to open-loop evaluations. It will be useful for practical applications.\n2. The platform utilizes road networks from cities worldwide and allows for the generation of diverse traffic scenarios with varying styles, which is essential for training and evaluating driving agents across different driving environments. \n3. The submission provides a clear and detailed explanation of the technical aspects of DriveArena. The figures, tables, and appendices enhance the understanding of the system's components and their interactions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The submission introduces DriveArena, a high-fidelity closed-loop simulation platform designed for testing and developing autonomous driving agents in real-world scenarios. The platform consists of two main components: the Traffic Manager and the World Dreamer. The Traffic Manager is responsible for generating realistic traffic flow on any global street map, while the World Dreamer is a high-fidelity conditional generative model that creates infinite autoregressive simulations. DRIVEARENA enables the generation of diverse traffic scenarios with varying styles and allows driving agents that can process real-world images to navigate within its simulated environment." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The submission lacks novelty. The proposed DriveArena is just a combination of several existing methods: LimSim for traffic simulation and condition generation; DriveDreamer/Vista for generating images from the conditions; NAVSIM and Carla for closed-loop evaluation. \n2. The World Dreamer model is trained primarily on the nuScenes dataset, which may not capture diverse driving scenarios. To improve the model's generalizability, it would be beneficial to incorporate additional datasets that represent different geographical locations, driving cultures, and road conditions.\n3. This submission fails to address an important issue for closed-loop evaluation: the model should be able to generate the same scene captured from different positions (similar to actual scenarios of driving differently in the same scene). No visualization was found addressing such an issue. \n4. Experiments are not enough. The submission primarily focuses on the simulation platform itself rather than an in-depth evaluation of various driving agents within the platform. Expanding the experimental section to include a broader range of driving agents and more extensive testing can help provide a clearer picture of DRIVEARENA's capabilities and limitations.\n5. Minor issues in writing and presentation. For example, The figures are not vectorized for zooming in and they are suggested to be replaced." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "In Lines 261-263 it is written \"We also verify that extending to a multi-frame auto-regressive version (using multiple past frames as reference and outputting multi-frame images) and adding additional temporal modules can enhance temporal consistency.\" - How does this work in practice? In closed-loop, the ego-vehicle can drive however it wants and so there can still be inconsistencies between generated videos at time t and t+1, right? Further, how are the generated videos used? Are T frames predicted but only the 1st one is shown to the ego-policy? And then a new prediction is made (similar to model-predictive control)?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The topic of generative simulation is an especially timely one for the AV field, with better and better generative video models coming out regularly and frameworks such as this one being able to capitalize on these parallel investments.\n\nWhile all of the individual pieces of this framework have existed before, connecting them all together into a usable simulation framework for the broader community to use is appreciated and presents a potential path towards practical generative simulation.\n\nThe paper is written well and it is easy to follow the core ideas as presented.\n\nLeveraging layout-conditional generation is a sound and sensible idea for maintaining consistency across time." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work presents a generative simulation framework for autonomous driving. In particular, a layout-conditional diffusion model is proposed as a sensor simulator, with bounding boxes and road graphs serve as underlying world state." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The core weakness is that any geometric or semantic consistency is not guaranteed. Layout conditioning certainly helps, but in the linked webpage's videos there are clear inconsistencies across timesteps (e.g., car color and types changing over time). This is something that is not brought up in Lines 190 - 198, but perhaps should be as it is the core reason why works leverage NeRF or 3D Gaussian Splatting (for their geometric/semantic/temporal consistency over purely-2D generative models).\n\nWhile static images of generations appear to be of good quality, there are significant temporal consistency issues when viewed as part of a video on the linked project page (most videos appear to be static even with the ego-vehicle theoretically moving forward in the world). Do the authors have any idea for why that is? It almost appears that the video diffusion model suffers from mode collapse when tasked with generating building walls (taking example from the linked webpage).\n\nThe AV results in Tables 1 and 2 still show a significant gap to real data, indicating that, while the core points of DriveArena are sensible, there is still much work to be done to leverage it for practical AV development." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* Evaluation of sim-to-real gap: How are the videos generated? Is this in open- or closed-loop? For correctness, it should be closed-loop. If so, do you notice any suffering from DAgger issues?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* Highly relevant research direction: The work correctly argues for the need of closed-loop evaluation of autonomous driving behavior models.\n* Novelty: High-fidelity closed-loop image-based simulation with clear controllability (e.g. via text prompts).\n* Performance: Evaluation of sim-to-real gap shows superiority over MagicDrive and reasonable results for open-loop and closed-loop behavior eval.\n* Well presented: The paper is easy to follow and all information is well presented." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work presents DriveArena, an image-based high-fidelity simulator for closed-loop simulation of agents for autonomous driving. DriveArena consists of a Traffic Manager and a World Dreamer, and its modular design allows for easy replacement of each of the components. Traffic Manager enables the dynamic control of all traffic agents and supports various HD maps, both of which are inputs to World Dreamer, which uses conditional diffusion to generate realistic images. The diffusion model is conditioned on map and object layout and generates images autoregressively for temporal consistency and variable length simulations.\n\nDriveArena is evaluated in two ways. First, its fidelity is evaluated, among others, with UniAD's performance. Then, open- and closed-loop evaluation of VAD and UniAD models is performed." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* Evaluation of sim-to-real gap: The presented evaluation is pretty short and for example lower L2 distances do not necessarily imply higher quality images. Additional evaluation of the fidelity would be helpful. Are there perception metrics such as FID that could be used or other metrics that compare statistics between ground truth and generated images? Otherwise, user studies are another possibility to judge the quality.\n* Unclear takeaway from VAD vs. UniAD open- and closed-loop comparison: In open-loop, UniAD performs better on nuScenes but worse on DriveArena than VAD. This difference is explained with better open-loop generalization of VAD. However, it's unclear what role the fidelity of DriveArena plays. Is it possible to e.g. run an experiment with different DriveArena datasets, some that are closer and some that are further from nuScenes? In closed-loop eval, UniAD outperforms VAD in DriveArena. It's unclear whether these differences are due to open- / closed-loop model gaps or issues in DriveArena. I acknowledge that this difficulty of correct attribution is inherent to research in this area but you might be able to shed more light on this. For example, would it be possible to evaluate the models on various levels of fidelity in DriveArena to disentangle open- / closed-loop eval from it?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "+ About constructing traffic scenarios from OSM.\n + How is the HD map built from the OSM data, and how is traffic demand generated in this kind of scenario? \n + OSM maps are not high quality in many areas, is there a way to solve this?\n + Is this part of the work mainly based on the tools provided by SUMO or LimSim?\n+ Are there any indicators to evaluate the generated results directly, like FID or FVD?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Compared to previous work using video generation methods as world models to achieve realistic traffic simulation, this work uses a two-step pipeline, including rule-based traffic simulation in geometric space and diffusion-based video generation conditioned on trajectory layouts of vehicles. I believe this approach can achieve better physical realism and temporal consistency.\n\nText prompts are introduced to achieve diverse driving scenarios and plenty of demos are presented to clearly show the generation results.\n\nThe codes are well organized with a modularized design and the whole platform is open-source to better support downstream tasks in research of autonomous driving." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a traffic simulation platform for testing autonomous driving algorithms. The simulation architecture is built based on LimSim, using Monte Carlo tree search for vehicle motion planning. A diffusion-based renderer is applied to achieve realistic and controllable surround images for vision-based driving agents. The platform supports driving scenario construction from nuScenes and OSM; codes and tools are open-source for the community to use." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The author says that one of the main contributions is scalability, which means simulation on any region can be achieved by using map info from OSM. As far as I know, OSM only contains road-level map information, and extra efforts like completing lane topology and planning vehicle OD are needed to construct simulations based on it, this part of the work seems unclear in this paper.\n\nAs the dreamer is the most important part of this paper, it would be better if the author could provide some indicators that can directly evaluate the generated results, like FID.\n\nMinor note: it seems that Figure 3 is not in vector format." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "DriveArena is a pioneering closed-loop autonomous driving simulator based on conditional generative models for training and testing driving agents." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024drivearena,\ntitle={DriveArena: A Closed-loop Generative Simulation Platform for Autonomous Driving},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4S9bBbX1be},\nnote={under review}\n}" }, "abstract": { "value": "This paper introduces DriveArena, the first high-fidelity closed-loop simulation system designed for driving agents navigating real-world scenarios. DriveArena comprises two core components: Traffic Manager, a traffic simulator capable of generating realistic traffic flow on any global street map, and World Dreamer, a high-fidelity conditional generative model with infinite auto-regression. DriveArena supports closed-loop simulation using road networks from cities worldwide, enabling the generation of diverse traffic scenarios with varying styles. This powerful synergy empowers any driving agent capable of processing real-world images to navigate in DriveArena's simulated environment. Furthermore, DriveArena features a flexible, modular architecture, allowing for multiple implementations of its core components and driving agents. Serving as a highly realistic arena for these players, our work provides a valuable platform for developing and evaluating driving agents across diverse and challenging scenarios. DriveArena takes a significant leap forward in leveraging generative models for driving simulation platforms, opening new avenues for closed-loop evaluation of autonomous driving systems." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Autonomous Driving", "Diffusion Model", "Closed-loop Simulation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/f71f3b768df9f40d995082ce12f052f327c6cb3b.pdf" }, "presentation": null, "primary_area": { "value": "applications to robotics, autonomy, planning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/ca33a46441fac4362d7871fbf6ef590e90fffc06.zip" }, "title": { "value": "DriveArena: A Closed-loop Generative Simulation Platform for Autonomous Driving" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4Sv5MQ931E
MSR-ViR: Modularized Self-reflected Video Reasoner for Video Question Answering
main
Active
Video Question Answering;Multimodal LLM;Modular Network;Self-reflected Training
applications to computer vision, audio, language, and other modalities
5;5;5
3;4;5
3;3;4
3;2;3
3;3;3
5
4
3.333333
2.666667
3
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "To generate reasoning paths for VideoQA, would it be more effective to design a Chain-of-Thought dataset and perform supervised fine-tuning (SFT)? The O1 model currently adopts this approach, achieving clear reasoning paths through an end-to-end structure." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1. The MoST-Grounding module introduces two standard modules—temporal localizer and spatial localizer—which can be flexibly assembled in sequence based on the question parser. This structure is robust and allows for reliable generation of reasoning paths.\n\n2. The authors present a clear motivation: to create a framework for generating reasoning paths for the black-box nature of VideoQA tasks. The comprehensive visualization of reasoning paths further demonstrates the effectiveness of the model." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper addresses the use of multimodal Large Language Models in understanding tasks across various multimodal scenarios, specifically focusing on applications in Video Question Answering. However, current Multimodal Large Language Models are largely black-box systems for VideoQA tasks, lacking the ability to provide an understandable reasoning path and, thus, suffer from limited interpretability. To address this, the authors propose MSR-ViR, which constructs a modular reasoning structure designed to generate reasoning paths, and incorporates a reinforcement learning framework to prevent the model from generating unreasonable reasoning paths.\n\nWhile the proposed approach is interesting, it relies on the integration of four existing models. This ensemble-based structure shows only marginal performance improvements (1-2%), and the manuscript does not discuss crucial aspects such as reasoning time costs or memory overhead." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The question parser and LLM reasoning components both rely on LLM structures, leading to high computational costs.\n\n2. Both the temporal localizer and spatial localizer use existing models, specifically UniVTG and YOLO-World, which contribute to significant parameter overhead. As the complexity of VideoQA tasks increases, relying on these two models alone may not only limit predictive accuracy but also compromise the completeness of reasoning paths. Future work may need to explore additional modules to support diverse combinations (see [1] for reference).\n\n3. The ablation study lacks comprehensiveness. While the authors assess model performance on QA and grounding tasks and provide an effectiveness analysis of each module, they do not evaluate inference speed, parameter count, or other metrics compared to end-to-end models. Given that the proposed framework integrates multiple existing large models, an analysis of inference speed is both important and currently missing.\n\n[1]. Neural Module Networks" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "see weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1.\tSuccessfully develop a ground-then-answer framework for interpretable video question parsing. The question parser policy is able to be optimized via answer feedback. \n\n2.\tThe approach is well presented and easy to understand." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper aims to improve the interpretability of Multimodal LLMs in performing VideoQA. To achieve the goal, the authors design a modular self-reflection framework MSR-ViR. The framework primarily comprises a spatial-temporal grounding module and a self-refection learning mechanism based on DPO. MSR-ViR basically decouples video grounding from videoqa, enabling the interpretaion of intermediate results to understand the answers. The experiments on related datasets have demonstrated the strength of the approach in both accuracy and interpretability (grounded accuracy)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tThe paper does not improve video grounding but just uses existing method UniVTG. According to table 2, the grounding performance in terms of IoP@0.5 is worse than previous VLMs (VGT and TempCLIP). This severely limits the improvements of QA performance. \n2.\tAccording to the model ablation results in Table 3, the global representation g_v (which opens back door for grounded QA) seems more crucial than other components. Such results slightly depart from the major claim of interpretable VQA where correct answers are anchored on correct visual content.\n3.\tShould compare with SeViLA which also finetunes a localizer on QVHighlight (like UniVTG) for grounded VideoQA." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See Weaknesses" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1) The paper is well written and the motivation is clear, with a strong focus on the interpretability challenge in VideoQA, making it highly relevant to the field.\n2) The MoST-Grounding module integrates multiple submodules to effectively localize temporal and spatial regions, improving transparency in the input to the MLLM.\n3) The Alternate Self-Reflection strategy introduces a novel reinforcement-based method to align the question parser and the MLLM, enhancing performance and consistency." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the black-box problem of multimodal large language models (MLLMs) in VideoQA by proposing MSR-ViR, a novel framework. MSR-ViR introduces two core components: (1) the MoST-Grounding module, which localizes relevant temporal segments and spatial regions in videos, and (2) an Alternate Self-Reflection Training Strategy, which iteratively enhances the question parser and the MLLM. Evaluations on datasets such as NExT-QA, STAR, and NExT-GQA demonstrate that MSR-ViR achieves competitive performance on VideoQA tasks and improves interpretability by providing visual evidence for answers." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1) The framework is relatively heavy, relying on multiple external tools for tasks and additional operations such as resizing and truncation, which increases computational overhead. Moreover, what if these external tools are unreliable, which can lead to further exposure bias? It's necessary to further investigate the choice of the sub-modules in the MoST-Grounding module.\n2) While the approach improves the selection of input information, it does not make the internal reasoning process of the MLLM more interpretable. It still focuses on the process of 'input' to decide which information should be fed into the MLLM as soft prompts. \n3) The paper misses references with related works such as SeViLa [1] and GCG [2], which also focus on VideoQA with grounding elements. Including these baselines would strengthen the empirical validation.\n\n[1] Yu et al. \"Self-Chained Image-Language Model for Video Localization and Question Answering\", 2023 NIPS\n[2] Wang et al. \"Weakly Supervised Gaussian Contrastive Grounding with Large Multimodal Models for Video Question Answering\", 2024 ACM MM" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024msrvir,\ntitle={{MSR}-ViR: Modularized Self-reflected Video Reasoner for Video Question Answering},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4Sv5MQ931E},\nnote={under review}\n}" }, "abstract": { "value": "Recently, multimodal large language models (multimodal LLMs) have been applied to a wide range of video understanding tasks, particularly for Video Question Answering (VideoQA). However, existing multimodal LLMs suffer from the following challenge: the classic end-to-end training strategies of multimodal LLMs for VideoQA tasks are black-box, thus lacking interpretability as they can neither present a reasoning path nor indicate where the answer is derived from the video. To tackle this challenge, we propose MSR-ViR (Modularized Self-Reflected Video Reasoner), a self-reflected framework that introduces a Modularized Spatial-Temporal Grounding (MoST-Grounding) module to multimodal LLMs for VideoQA tasks. MoST-Grounding utilizes a question parser LLM to generate execution policies, which serve as a reasoning path from questions to answers providing interpretability for our VideoQA framework. Based on the execution policies, MoST-Grounding invokes various small modules to localize temporal segments and spatial regions in videos which provide multimodal LLMs with most relevant visual information, while presenting visual evidence of our final answers. To avoid the question parser LLM generating unreasonable policies, we further propose a reinforcement learning-based Alternate Self-reflection training strategy to optimize the Multimodal LLM and the question parser LLM. Experiments on VideoQA datasets (NExT-QA and STAR) and grounded VideoQA dataset (NExT-GQA) demonstrate that our method significantly improves video understanding capabilities of multimodal LLMs, while providing interpretable reasoning paths together with temporal and spatial localization evidence within the video." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Video Question Answering", "Multimodal LLM", "Modular Network", "Self-reflected Training" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/bc2578be11dcf758ee1e9d5318f4727faf706324.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "MSR-ViR: Modularized Self-reflected Video Reasoner for Video Question Answering" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4T33izzFpK
metabench - A Sparse Benchmark of Reasoning and Knowledge in Large Language Models
main
Active
llm;benchmarking;item response theory;factor analysis;information
datasets and benchmarks
5;6;6;6
2;3;2;3
2;4;3;4
2;4;3;3
2;3;3;4
5.75
2.5
3.25
3
3
0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "For quite a few models on the leaderboard, the MMLU score will be random chance (~25%, which you can see in Figure 1). Would it be a useful preprocessing step to subtract out random chance from the score and renormalize? E.g. take (score - 0.25) / (1 - 0.25)." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The paper is well-written and clearly communicates its ideas and methods. In the choice of the IRT, multiple models and methods for estimating the ability are explored. The method proposed produces a much smaller benchmark which the authors demonstrate has better predictive power than randomly subsampling items (Figure 1B). Careful consideration is given to potential limitations of the method, including assumptions about the conditional independence of the LLMs used for the study. The work also considers the interesting idea of a benchmark that performs adaptive testing in which items are selected sequentially based on a current estimate of the model's ability.\n\nOverall I think the paper makes meaningful contributions to studying LLM benchmarks and making model evaluation more efficient, and I thus lean towards acceptance. However, I do think the benchmarks considered are missing some of the abilities that people seek to measure in LLMs (e.g. coding), somewhat limiting the work's impact. I seek to provide concrete suggestions regarding this in the next section." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper considers the six LLM benchmarks included in the Open LLM Leaderboard (ARC, GSM8K, HellaSwag, MMLU, TruthfulQA, and WinoGrande) and seeks to create a much smaller benchmark that is predictive of the original suite by subselecting items. This is done using data from more than 5000 LLMs included in the leaderboard and a pyschometric method called item response theory (IRT) which in essence fits a model that estimates the item's difficulty and how well the item discrimates between models whose \"abilities\" are close to the item's difficulty. (Note this model ability is also fit by the method in an alternating fashion.) The presented method results in a benchmark that is only 3% the size of the original benchmark but is able to effectively reconstruct both the original individual benchmark scores and the joint score. Finally, using factor analysis, the authors demonstate that a single latent is predictive of all 6 benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My comments in this section are not intended to be required changes to the paper but rather a discussion of what I think the authors could add to have more significant impact.\n\nCurrently the main output of the paper is a much smaller benchmark that can be used to efficiently rank models on the six benchmarks as well as evidence from factor analysis that all six benchmarks are measuring a single latent ability. However, across the broader field of LLM benchmarks, it is generally assumed that there are multiple latent dimensions to the abilities of LLMs. For example, if a code benchmark was added into the set, I would assume this would require another latent dimension to fit model performance, and it would be intriguing if this was not true! Also I would be curious if a larger fraction of the test items is required to reconstruct the scores when the set of included benchmarks require multiple latent ability dimensions to represent.\n\nIn essence, the most interesting direction I see for this work is to apply the methods to a more comprehensive set of benchmarks to try to discover latent ability dimensions that might be interpretable as what we think of as LLM capabilities. This should then also provide a characterization of which of these abilities each benchmark measures." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Will a small benchmark lead to a large variance in evaluation?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper distills six prominent LLM benchmarks into a much smaller one with less than 3% of the size, which enables more streamlined and cost-effective evaluation methods;\n2. The new sparse benchmark yields estimators able to reconstruct the original benchmark score." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces metabench, a sparse benchmark distilled from six prominent benchmarks (ARC, GSM8K, HellaSwag, MMLU, TruthfulQA, and WinoGrande). Simple criteria, cross-validated subsampling, and information-based filtering are used to reduce the size of the benchmark. Original scores are reconstructed in a cross-validated manner." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "As mentioned in the limitations section, a smaller benchmark has the risk of being memorized." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Could authors elaborate on potential limitations when applying Metabench to other domains?\n2. How might Metabench handle scenarios where specific benchmarks assess unique skills not captured by a general latent factor?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1. The paper’s technical approach is methodologically sound, with robust use of IRT and statistical modeling to identify informative items.\n2. It is well-organized, with a clear explanation of Metabench’s goals and psychometric techniques.\n3. It makes a substantial contribution to LLM evaluation, providing a novel, efficient, and scalable benchmarking solution." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces Metabench, a sparse benchmarking method designed to evaluate large language models (LLMs) with minimal redundancy and resource demands. By analyzing data from over 5000 LLMs across six benchmarks (ARC, GSM8K, HellaSwag, MMLU, TruthfulQA, and WinoGrande), Metabench distills these into a much smaller subset, reducing the combined item count by over 97%. Using psychometric techniques such as Item Response Theory (IRT), Metabench selects the most informative items, facilitating efficient and accurate evaluation while maintaining the integrity of the original benchmarks. The sparse benchmark achieves impressive fidelity, reconstructing original scores with less than 1.24% RMSE on average, and identifying a single common latent factor strongly correlating with general model ability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The framework currently focuses on six benchmarks; additional work could explore its applicability across a broader range of LLM tasks or domains.\n2. Metabench’s dependence on psychometric models, especially IRT, could be limiting if these models do not fully capture the complexities of LLM behavior, as they were traditionally designed for human subjects." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Could alternative item selection methods (beyond Fisher information) yield better results?\n2. How stable are the results across different random seeds and model subsets?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Novel application of psychometric methods to LLM evaluation\n2. Impressive compression ratio (<3% of original size) while maintaining accuracy. Low reconstruction error (1.24% RMSE for individual benchmarks, 0.58% for total score).\n3. Comprehensive ablation studies and baseline comparisons. Thorough investigation of factor structure across benchmarks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes metabench, a compressed version of six popular LLM benchmarks (ARC, GSM8K, HellaSwag, MMLU, TruthfulQA, and WinoGrande) that achieves comparable evaluation capability while using less than 3% of the original items. The authors leverage psychometric techniques, particularly Item Response Theory (IRT), to identify the most informative test items and estimate latent abilities that can reconstruct original benchmark scores with high accuracy." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Memorization risks: \n (1) Smaller benchmark size increases memorization vulnerability\n (2) Proposed mitigation strategies need further validation\n2. Theoretical Assumptions:\n (1) IRT assumptions about LLMs need more justification\n (2) Independence assumptions between models may be violated due to shared architectures/training data" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "metabench is the data-driven distilled version of six popular LLM benchmarks, drastically reducing their size with minimal information loss" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024metabench,\ntitle={metabench - A Sparse Benchmark of Reasoning and Knowledge in Large Language Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4T33izzFpK},\nnote={under review}\n}" }, "abstract": { "value": "Large Language Models (LLMs) vary in their abilities on a range of tasks. Initiatives such as the $\\texttt{Open LLM Leaderboard}$ aim to quantify these differences with several large benchmarks (sets of test items to which an LLM can respond either correctly or incorrectly).\n However, high correlations within and between benchmark scores suggest that (1) there exists a small set of common underlying abilities that these benchmarks measure, and (2) items tap into redundant information and the benchmarks may thus be considerably compressed.\n We use data from $n > 5000$ LLMs to identify the most informative items of six benchmarks, $\\texttt{ARC}, \\texttt{GSM8K}, \\texttt{HellaSwag}, \\texttt{MMLU}, \\texttt{TruthfulQA}$ and $\\texttt{WinoGrande}$ (with $d=28,632$ items in total). From them we distill a sparse benchmark, \\texttt{metabench}, that has less than $3\\%$ of the original size of all six benchmarks combined. This new sparse benchmark goes beyond point scores by yielding estimators of the underlying benchmark-specific abilities.\n We show that these estimators (1) can be used to reconstruct each original \\textit{individual} benchmark score with, on average, $1.24\\%$ root mean square error (RMSE), (2) reconstruct the original \\textit{total} score with $0.58\\%$ RMSE, and (3) have a single underlying common factor whose Spearman correlation with the total score is $r = 0.94$." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "llm", "benchmarking", "item response theory", "factor analysis", "information" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/dc39dc3c648417a95ca13fd0f37e3b3c859d724e.pdf" }, "presentation": null, "primary_area": { "value": "datasets and benchmarks" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/b2ee3999e2552d5ae5a9fd8cd89f5e720281bae2.zip" }, "title": { "value": "metabench - A Sparse Benchmark of Reasoning and Knowledge in Large Language Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4UXIGATUTj
Forecasting Whole-Brain Neural Activity from Volumetric Video
main
Active
neuroscience;forecasting;video;lightsheet microscopy;zebrafish;calcium imaging;neuron activity
applications to neuroscience & cognitive science
3;5;8
4;4;3
2;2;4
2;3;3
1;3;3
5.333333
3.666667
2.666667
2.666667
2.333333
-0.917663
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Could \"scaling the field of view of the network so the size remains constant while increasing to full resolution\" make the full-resolution model handicapped in terms of field of view? There is a chance this is already answered in the paper but the reader missed it.\nOther than that, fixing the presentation issues in the previous section." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The paper is original in the sense of creative combinations of existing ideas (the components of the model that enable scaling), application to a new domain (forecasting on minimally preprocessed / raw data from weather to neural data), as well as removing limitations from previous works (removing dependency on segmentation mask accuracy and avoiding loss of information caused by conversion to trace-matrix). Writing style is very high quality, and all the methods and results get across in a relatively clear manner to the reader. To the specific line of research (forecasting neural data), the method proposed seems to be a significant step forward in the field’s development, moving from hand-crafted to learnable features." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a novel method for neural activity forecasting of zebrafish that works on the raw volumetric video instead of using standard preprocessing that reduces the original 4D space to 1D (trace matrix) and disregards spatial relationships between neurons. To do this, a u-net architecture is employed, taking advantage of a large scale neural dataset and performing extensive ablations for model selection. Multiple measures were taken to enable scaling the architecture for this computationally expensive problem, such as using the temporal context dimension as input channels, lead-time conditioning, and distributed training. The ablation results show that (1) pretraining on other specimens does not help, (2) there is a trade-off between spatial and temporal context, and (3) that downsampling input resolution up to 4x is beneficial to performance. Compared to the best trace-matrix models, the proposed multivariate model achieves 10% reduction in MAE for the short context forecasting setting in both the test and the holdout sets, while it is comparable to the trace-matrix models for long-context forecasting in the test set and 12% better in the holdout set." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "There are some presentation issues that impact the understanding of a reader that is not very familiar to forecasting neural data. \n* These are mainly in the abstract and introduction, while once entering section 2 all misunderstandings of this reader were resolved. Nevertheless it would be beneficial for the paper to have them fixed. (1) Until section 2 it was not very clear that the goal is to predict future steps from previous steps, and not one neural modality (e.g. electrical signal) from another (e.g. blood oxygen). (2) It was not clear that the neuron segmentation mask is applied in both the trace models and the proposed model, but at different points in the pipeline, i.e. in the latter the forecasting itself is done on the volumetric video and afterwards the mask is applied before computing the error - without knowing this it is not clear how the two methods can be compared fairly. It would also help if in Figure 1 the same notation was used between orange (trace) and blue (proposed) in the segmentation mask block, i.e. instead of “Extract Neurons” and “Mask Neurons” say “Apply segmentation mask” in both cases. (3) In the abstract the phrase “we design a model to handle the high resolution and large receptive fields…” is structured in a confusing way where the reader does not understand if the large receptive fields are an aspect of the model or the recordings. (4) Minor - the footnote on page 2 is not so much footnote information, but rather more suitable for the main text.\n* Additionally, Figure 2 should have a more informative caption that explains better what is shown, e.g. it is not clear what the colored blobs are, segmentation masks?\n* Is H=32 the only setting that is tested and why? Not sufficiently described in the paper.\n* In the conclusion, calling the findings counterintuitive seems excessive; there is no reason to assume that high input resolution, pretraining, or increased model capacity works well for all domains and applications. Results sufficiently showcase enough reasons why these sometimes useful settings might lead here to overfitting, distribution shifts, etc." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Major\n- It would be nice to test whether or not the trace-base model performed (relatively) poorly due to the lack of inter-cell activities or imperfect masking as suggested in Section 2 and Figure 2. I suggest applying the segmentation masks to the video, i.e. set regions outside of the identified cells as background, and train the video-based model on the masked videos. This should give us a sense of the influence of inter-cell activities and imperfect masking, and isolate the influence of spatial organization of the cells. \n- I suggest the authors include metrics that are commonly used in neural response prediction so that readers can have a sense of how well these models are performing, such as normalized correlation ($CC_\\text{norm}$) [1], or fraction of explainable variance explained (FEV) [2], basically metrics that takes trial-to-trial variability into account.\n- Can the authors comment on the computational cost of the models? The authors stated in the hyperparameter search section and appendix A.3 that 16 A100 40GB GPUs are used to train the video-based model, and ~5k GPU hours (so ~300 hours in wall-time) was used in the loss ablation experiment in Figure 4, which is a considerable amount of computational time and cost. Can the authors share the time it took to train the final (best) video-based and trace-based models? I believe the authors should discuss the trade-off between the two approaches in computation cost if they are indeed substantially different. To clarify, I think it is fine for the method to be more computationally expensive than other methods, but it is important to point it out.\n\nMinor\n- What is the frame rate of the video? \n- Why and how are the two temporal context lengths (4 and 256) selected? Does it make sense to predict the future 32 frames from only 4 frames?\n- In the hyperparameters section and Figure 1, it is stated that the models optimize the trace-based MAE. Does this include the video-based model? Since the video-based model inputs and outputs a video, does it make a difference to optimize the recorded and predicted video MAE?\n- How are the hyperparameters selected? Hand-picked or via some form of hyperparameter search (random search, bayesian search, etc.)\n\n[1] Schoppe, Oliver, et al. \"Measuring the performance of neural models.\" Frontiers in computational neuroscience 10 (2016): 10.\n\n[2] Cadena, Santiago A., et al. \"Deep convolutional models improve predictions of macaque V1 responses to natural images.\" PLoS computational biology 15.4 (2019): e1006897." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- To my knowledge, neural response prediction/forecasting directly from volumetric video is novel. This allows minimal preprocessing of the data which can be beneficial to deep learning-based methods.\n- A wide range of training and evaluation conditions are compared, including trade-offs of spatial and temporal resolution, pre-training vs direct training, and training set size and combinations. These empirical results can guide future work in modeling neural responses." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a new approach to neuronal response modeling by predicting/forecasting the volumetric video instead of the per-neuron calcium trace (dF/F) or spike train, which is the norm in neural response prediction. This approach allows the model to take advantage of the inter-cell activity and spatial organization of the population that is typically discarded when deconvolving the volumetric video to individual response traces. The authors evaluated a range of video and trace-based models on ZAPBench and showed that the video-based model outperforms trace-based models in short temporal context length conditions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Please find my suggestions for the following points in the Question section.\n- A key motivation of this work, as stated by the authors in Section 2, is that the typical deconvolution step to convert volumetric video to dF/F response traces can lead to loss of information, such as cell spatial organization, inter-cell activities, etc. However, while the video-based model appears to outperform the trace-based model in short temporal context-length conditions (though similar performance in longer context length), it is unclear whether or not this is due to the additional information that exists in the raw data and that the video-based model is indeed taking advantage of such information.\n- MAE might not be the most intuitive metric for getting a sense of how the (video and traced-based) models are performing. For instance, I am not sure if an MAE value of 0.02 is good or bad, or how big of a difference is an MAE of 0.02 to 0.04? In particular, I believe ZAPBench is a new dataset and we don’t have any other models to compare against these MAE values, other than the single trace-based model provided.\n- Unclear trade-off in computation cost between video-based and trace-based models." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1.On line 308, the authors refer to a \"segmentation mask,\" yet on line 68, they claim that their method can directly process 4D data. The authors are requested to clarify the apparent contradiction in their narrative.\n\n2.In the fourth contribution (line 108), the authors state that their proposed method is the \"only approach that consistently benefits from multivariate information.\" However, I did not encounter any experimental justification related to multivariate information in the main text. If such experiments were conducted, please direct me to the relevant sections within the paper.\n\n\n3.The description of temporal dimension processing on line 200 is unclear. I would like to confirm whether the authors' approach involves merging the temporal dimension with the batch dimension, such as transforming the data shape as follows: (batch, 2048, 1152, 72, T) --> (batch*T, 2048, 1152, 72). If not, please provide clarification on their methodology." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.This paper employs a deep learning approach based on U-Net to directly process 4D neural activity recordings, circumventing complex preprocessing methods that may introduce performance degradation.\n\n2.A series of ablation studies have revealed practical insights into model pre-training and hyperparameter tuning." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This manuscript proposes the utilization of deep learning techniques for the prediction of neuronal activity recordings with fluorescent calcium, asserting superior performance over previous baselines. A series of ablation studies have revealed practical insights into model pre-training and hyperparameter tuning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.The authors fail to elucidate the significance of this study anywhere within the main text, which raises questions about the necessity of forecasting whole-brain neuronal activity. The authors are encouraged to provide additional context in the abstract or introduction section.\n\n2.This paper primarily utilizes the initial frames of neuronal activity recordings with fluorescent calcium indicators to predict subsequent frames, representing an application of U-Net in a specific domain. It does not offer additional insights or novel perspectives to advance the field of artificial intelligence. Therefore, this manuscript would be more appropriately submitted to conferences or journals focused on neuroscience or medical image processing, as it does not align with the thematic scope of ICLR.\n\n3.The paper presents a limited comparison with only one baseline, namely the \"trace-based model,\" as shown in Figure 6. It raises the question whether ZAPBench, as a benchmark, evaluated only this single model type. The authors are encouraged to include additional baselines for comparison to substantiate the superiority of the proposed method." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a new method for forecasting neuronal activity using volumetric videos, leveraging spatial relationships between neurons and outperforming traditional trace-based methods." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024forecasting,\ntitle={Forecasting Whole-Brain Neural Activity from Volumetric Video},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4UXIGATUTj},\nnote={under review}\n}" }, "abstract": { "value": "Large-scale neuronal activity recordings with fluorescent calcium indicators are increasingly common, yielding high-resolution 2D or 3D videos. Traditional analysis pipelines reduce this data to 1D traces by segmenting regions of interest, leading to inevitable information loss. Inspired by the success of deep learning on minimally processed data in other domains, we investigate the potential of forecasting neuronal activity directly from volumetric videos: we design a model to handle the high resolution and large receptive fields necessary for capturing spatio-temporal dependencies in volumetric whole-brain recordings. We explore effects of pre-training and perform extensive model selection, analyzing spatio-temporal trade-offs for generating accurate forecasts. Our model outperforms trace-based forecasting approaches on ZAPBench, a recently proposed benchmark on whole-brain activity prediction in zebrafish, demonstrating the advantages of preserving the spatial structure of neuronal activity." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "neuroscience", "forecasting", "video", "lightsheet microscopy", "zebrafish", "calcium imaging", "neuron activity" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/d295120254e9b0dc0a9f6797374cb7e4326166b4.pdf" }, "presentation": null, "primary_area": { "value": "applications to neuroscience & cognitive science" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Forecasting Whole-Brain Neural Activity from Volumetric Video" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4UxXe3JZta
HRVMamba: High-Resolution Visual State Space Model for Dense Prediction
main
Active
Mamba;Dense Prediction;Human pose estimation;Semantic segmentation
applications to computer vision, audio, language, and other modalities
3;5;5;5
4;5;5;4
2;3;2;3
1;2;2;3
2;3;3;3
4.5
4.5
2.5
2
2.75
0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See the weakness above" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "HRVMamba demonstrates competitive or superior performance on COCO, Cityscapes, and PASCAL-Context benchmarks, often with fewer parameters and reduced computational load compared to similar models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes HRVMamba, a High-Resolution Visual State Space Model designed for dense prediction tasks, such as human pose estimation and semantic segmentation. The paper addresses limitations in existing Mamba-based models, particularly Mamba’s low-resolution output and challenges in retaining long-range dependencies. To overcome these issues, the authors introduce the Dynamic Visual State Space (DVSS) block, which leverages multi-scale and deformable convolutions to improve inductive bias and mitigate long-range forgetting. By integrating these innovations within a high-resolution, multi-resolution parallel structure, HRVMamba achieves competitive results across dense prediction benchmarks compared to CNN, ViT, and SSM models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Limited Novelty: In my view, this paper incorporates techniques from CNN networks, such as DCNv4 and multi-resolution structures (from FPN and HRNet), into the Mamba block to enhance network performance. I am somewhat skeptical about whether such an innovation alone is sufficient for a publication at ICLR." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the weakness part." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- Empirical results indicate that HRVMamba outperforms contemporary CNNs, ViTs, and SSMs on benchmarks, delivering competitive performance with fewer computational resources.\n- The figures in the paper are clean and aesthetically pleasing, which enhances readability." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces HRVMamba, a high-resolution visual state space model tailored for dense prediction tasks. It builds on the Mamba framework, a hardware-efficient state space model (SSM) known for linear computational complexity. The authors highlight limitations in previous visual Mamba models—namely, insufficient inductive bias, long-range forgetting, and low-resolution outputs. To overcome these, HRVMamba incorporates the Dynamic Visual State Space (DVSS) block, combining multi-scale convolutional kernels and deformable convolution for enhanced local and long-range feature extraction. The HRVMamba model employs a multi-resolution parallel structure inspired by HRNet, preserving high-resolution representations and facilitating multi-scale feature learning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Limited Novelty: The HRVMamba model mainly combines existing methods, including the VSS block, DWConv, DCN, and the HRNet architecture. This integration-based approach may not meet ICLR’s high standards for innovation.\n- Concern for `Limitation 1`: While the paper addresses the lack of 2D inductive bias in previous visual Mamba models, it raises concerns about whether introducing such bias could restrict the **scaling ability** of HRVMamba. Vision Transformers (ViTs) have demonstrated that reduced inductive bias can facilitate better scaling, so incorporating strong inductive biases might limit HRVMamba's scalability and performance on larger-scale models.\n- Concern for `Limitation 2`: The paper uses Deformable Convolutions (DCN) to mitigate the long-range forgetting issue observed in previous visual Mamba models. However, there is a concern about whether DCN can effectively address this problem as the sequence length scales up. The efficacy of DCN for maintaining high-level feature relationships over significantly longer sequences remains uncertain, raising questions about its robustness as a scalable solution for long-range dependencies.\n- The paper references preprints and arXiv versions of significant works, such as Mamba (COLM), Vision Mamba (ICML), and VMamba (NeurIPS). The authors should update these citations to their final published versions to reflect the current state of the literature." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. The proposed HRVMamba add a multi-scale DW block and I'm concerned how about the performance of dropping the FFN block." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper presents the Dynamic Visual State Space (DVSS) block, which combines multi-scale convolutional kernels and deformable convolutions.\n2. This paper proposes the High-Resolution Visual State Space Model (HRVMamba) based on the DVSS block and the architecture of HRNet.\n3. The proposed HRVMamba obtains improvements compared to previous approaches on several dense prediction masks" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces HRVMamba, a High-Resolution Visual State Space Model designed for dense prediction tasks such as human pose estimation and semantic segmentation. HRVMamba addresses the limitations of previous Mamba models by incorporating a Dynamic Visual State Space (DVSS) block, which uses multi-scale convolutional kernels to enhance inductive bias and deformable convolutions to mitigate long-range forgetting. The model is based on a multi-resolution parallel design, preserving high-resolution representations throughout the network to facilitate effective multi-scale feature learning. Extensive experiments demonstrate HRVMamba's competitive performance against existing CNN, ViT, and SSM benchmark models on various dense prediction tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. This paper's novelty is limited. It incorporates the VMamba block[1] into the HRNet architecture[2], including DCNv4, making it lack sufficient novelty and insights. In addition, similar ideas have been explored in HRFormer[3,4]. The novelty of this paper is below the threshold for ICLR publication.\n2. The motivations for using DCNv4 and DVSS block in this paper are unclear. For example, this paper lacks a specific analysis of the long-range forgetting problem of Vmamba in vision and why DCNv4 can solve such a long-range forgetting problem of mamba. Whether from the theoretical or experimental analysis perspective, the authors need to provide exact evidence to present.\n3. Lack of comparisons with recent vision mamba works, such as MambaVision[5].\n4. In Fig.1, why are there neat blocks in the activation map? How can this be explained? Is it related to the scans in different directions of VMamba? Image activation is usually continuous, and I'm confused about it.\n5. How do the feature maps of different resolutions fuse, downsample, or upsample?\n6. How about the inference latency of the proposed HRVMamba, which includes VMamba blocks, multi-scale depthwise convolution blocks, and DCNv4 blocks?\n7. In Tab.7, adding a 3x3 convolution has no effect. However, adding larger depth-wise convolutions, such as 5x5, 7x7, or 9x9, improves a little (0.3 AP), but this also introduces many additional parameters. It's unclear here whether the effect comes from extra parameters, larger convolutions, larger receptive fields, or multi-scale convolutions.\n8. The performances of baseline methods (such as HRFormer) on Cityscapes and PASCAL Context are too low, which are far inconsistent with the original paper[3], for example, HRFormer-B obtains 82.6 mIoU (Cityscapes) and 58.5 mIoU (PASCAL Context) while achieving 77.3 mIoU (Cityscapes) and 42.6 mIoU (PASCAL Context) in this paper. I think a fair comparison is very important. However, the results of the current comparison methods are obviously much lower than those of the original methods. \n\nReferences\\\n[1] Liu et al. VMamba: Visual State Space Model. NeurIPS 2024.\\\n[2] Wang et al. Deep High-Resolution Representation Learning for Visual Recognition. TPAMI 2020.\\\n[3] Yuan et al. HRFormer: High-Resolution Transformer for Dense Prediction. NeurIPS 2021.\\\n[4] Gu et al. Multi-Scale High-Resolution Vision Transformer for Semantic Segmentation. CVPR 2022.\\\n[5] Hatamizadeh et al. MambaVision: A Hybrid Mamba-Transformer Vision Backbone. NeurIPS 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Why does the performance of HRFormer-B on the segmentation datasets significantly lag behind the reported results? For example, HRFormer-B + OCR achieves a mIoU of 82.6 on Cityscapes and 58.5 on PASCAL-Context datasets, respectively. However, the performance drops to 77.3 and 42.6 in Table 5, Lines 349-350." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper first introduces Vision Mamba to a multi-resolution branch architecture. Additionally, several techniques are introduced to alleviate the limitations of Vision Mamba, including deformable convolution and multi-kernel size convolution. These techniques provide improvements over the vanilla Vision Mamba baseline. The proposed methods are easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper summarizes the current challenges encountered when applying vision mamba for dense prediction tasks, including insufficient inductive bias, long-range forgetting, and low-resolution output representation. Subsequently, the authors propose corresponding solutions, including multi-scale convolution kernels, deformable convolution, and the HRNet framework to alleviate these issues." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. This paper feels more like a technical report instead of an academic paper. The challenges of Vision Mamba models are obtained from previous works, and the solutions are also based on current techniques. Besides, some observations and methods are not properly cited. For example, the problem where multi-way scanning approach disrupts 2D spatial dependencies has been proposed in MambaIR[1], and the solution of Multi-scale Depthwise block has also been introduced in SMT[2]. Given these considerations, this paper lacks introducing any new insights or techniques to the community.\n\n2. The experiments are not valid enough. For example, for the semantic segmentation, only results on Cityscapes and PASCAL Context are reported. The results on the widely used ADE20K are missing. The authors may consider reporting results with the same framework like Uppernet on ADE20K and compare with publicly available results like VMamba. Complementing these results and comparing with publicly available results will make this paper more solid.\n\n3. Some experimental data lack further explanation. In Table 7, the authors report the performance on the COCO val set. As the COCO dataset has many benchmarks, this \"val\" set is ambiguous. If it refers to the pose estimation, the best results here are 74.2, which is not coherent with the results in Table 3. Additional explanation for the setting differences is needed. Besides, there are some very weird data such as the one in L412. The authors mentioned the poor performance of LocalVMamba on the PASCAL-Context dataset, but no further explanation for the reason is provided.\n\n4. The detailed efficiency comparisons are missing, including the inference speed and memory cost on different datasets.\n\n5. Some minor errors: In L213, Fig.3 refers to Fig.2 by mistake. Besides, the contents from L445 to L450 are not appropriate for the ablation section.\n\n6. This work does not give any discussion about the limitations.\n\nReference:\n\n[1]. Guo, Hang, et al. \"MambaIR: A Simple Baseline for Image Restoration with State-Space Model.\" arXiv e-prints (2024): arXiv-2402.\n\n[2]. Lin, Weifeng, et al. \"Scale-aware modulation meet transformer.\" Proceedings of the IEEE/CVF International Conference on Computer Vision. 2023." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024hrvmamba,\ntitle={{HRVM}amba: High-Resolution Visual State Space Model for Dense Prediction},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4UxXe3JZta},\nnote={under review}\n}" }, "abstract": { "value": "Recently, State Space Models (SSMs) with efficient hardware-aware designs, \\ie, Mamba, have demonstrated significant potential in computer vision tasks due to their linear computational complexity with respect to token length and their global receptive field. However, Mamba's performance on dense prediction tasks, including human pose estimation and semantic segmentation, has been constrained by three key challenges: insufficient inductive bias, long-range forgetting, and low-resolution output representation.\nTo address these challenges, we introduce the Dynamic Visual State Space (DVSS) block, which utilizes multi-scale convolutional kernels to extract local features across different scales and enhance inductive bias, and employs deformable convolution to mitigate the long-range forgetting problem while enabling adaptive spatial aggregation based on input and task-specific information. By leveraging the multi-resolution parallel design proposed in HRNet, we introduce High-Resolution Visual State Space Model (HRVMamba) based on the DVSS block, which preserves high-resolution representations throughout the entire process while promoting effective multi-scale feature learning.\nExtensive experiments highlight HRVMamba's impressive performance on dense prediction tasks, achieving competitive results against existing benchmark models without bells and whistles.\nWe will make the source code publicly accessible." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Mamba", "Dense Prediction", "Human pose estimation", "Semantic segmentation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/43b8895c02f64825cd898821c60038dc64c54e1f.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "HRVMamba: High-Resolution Visual State Space Model for Dense Prediction" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4VHiptx7xe
STRAP: Robot Sub-Trajectory Retrieval for Augmented Policy Learning
main
Active
dynamic time warping;few-shot imitation learning;retrieval;foundation models
applications to robotics, autonomy, planning
3;5;5;6
4;4;4;4
2;3;2;3
3;2;3;3
1;3;3;3
4.75
4
2.5
2.75
2.5
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Could STRAP be combined with fine-tuning of MT policies on the retrieved dataset to potentially achieve even better performance than domain-specific fine-tuning alone?\n\n- How does STRAP's performance compare against standard fine-tuning approaches when controlling for the total amount of data used?\n\n- What are the memory and computational requirements for deploying STRAP on very large trajectory datasets like Droid?\n\n- Can you add the average performance for LIBERO-10 results to the main table?\n\n- Can you provide a few more real world tasks with required baselines?\n\n- Real world retrieveal is conducted with the same robot embodiment and gripper. How does STRAP Perform when retrieving similar data from other robot datasets like BridgeV2 that does not share the same robot and scenes?\n\nSTRAP presents an novel and well designed approach to few-shot learning, that tackles several drawbacks of prior methods through sub-trajectory retrieval with dynamic time-warping. However, more comprehensive comparisons against fine-tuned baselines and clearer analysis of computational requirements would strengthen the paper's contributions. Thus, I recommend weak reject pending addressing the following concerns: (1) comparisons against fine-tuned baselines, (2) clearer analysis of computational requirements, and (3) better justification of parameter choices." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The proposed method is well motivated and achieves strong results across multiple experiments, both in simulation and real-world settings\n\n- The use of Dynamic Time Warping for sub-trajectory matching is novel and well-suited for the problem domain\n\n- Comprehensive evaluation against recent retrieval baselines demonstrates the method's effectiveness\n\n- Thorough ablation studies on different pretrained encoders provide valuable insights into architecture choices\n\n- The paper is well written and includes several illustrative figures that enhance the text" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces STRAP, a novel method for trajectory retrieval to find similar sub-trajectory segments in large-scale datasets for efficient policy learning in few-shot settings. The method's key contribution lies in combining pretrained visual encoders with Dynamic Time Warping to encode sub-trajectories of variable lengths for improved retrieval. The proposed method is tested against several retrieval baselines and BC ones on LIBERO-10 simulation and real world pick and place tasks and achieves good performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The baseline comparison against multi-task policy appears weak, as it only uses pretrained weights without fine-tuning. This seems like an artificially weak baseline since fine-tuning is standard practice for all MT-policies. \n\n- The paper's argument that retrieval is more efficient than expensive pretraining needs stronger empirical support, especially given that the robotics community regularly fine-tunes general policies for downstream tasks\n\n- The computational cost of STRAP's retrieval process on large-scale datasets like Droid is not adequately addressed, raising questions about real-world scalability. Some more clarity is necessary here\n\n- The choice of K for constructing D-retrieval lacks sufficient explanation and ablations. The paper should explore how different K values affect both retrieval quality and computational overhead and policy performance, as this parameter likely presents a trade-off between performance and efficiency. A discussion about the retrieved data quantity would provide valuable insights and strengthen then paper. \n\n- Small number of tested tasks in real world setting and missing baselines of MT policy and finetuned MT policy" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Baseline: how does STRAP compare to the pretrain-then-finetune setup? (Pretrain on the prior dataset, then fine-tune on the few-shot target demonstrations?)\n- Baseline: how does STRAP compare to a multitask policy trained on all available data?\n- Generalization: to what extent does STRAP generalize across environments or embodiments?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Sub-trajectory retrieval for the few-shot demo behavioral cloning setting is a well-motivated and novel idea.\n- The method is clear and straightforward to implement.\n- Results show that matching with DTW on vision foundation model features are robust to variations and capture task semantics.\n- Real and simulated environments show that STRAP outperforms other retrieval methods and pure behavioral cloning." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper focuses on the setting of generalizing a policy to an unseen task with few-shot demonstrations. Instead of deploying zero-shot, the paper proposes STRAP, training a model on task-relevant data augmented by retrieval. STRAP first retrieves sub-trajectories from a large pretraining dataset that are similar to the new task demonstrations, then combines them with the few-shot demos to train a policy. Results on sim and real environments show that STRAP outperforms other retrieval methods and pure behavioral cloning. Ablations show that STRAP is compatible with various vision encoders and justify each of its component." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- STRAP requires few-shot demos and model training at test time for a new task.\n- It would be good to see more sim and real environments for evaluations.\n- It would be more convincing to see a behavioral cloning baseline that uses all available data." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Compared to the prior framework (Behavior Retrieval, Du et al., 2023), STRAP seems to have three main differences in retrieval system. (a) use non-parametric retrieve vs. VAE (b) use sub-trajectory wise retrieve vs. single state-action pairs and (c) use DTW. Among these, what gives the most / least performance gains?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- To deal with potentially variable length during retrieval, STRAP use dynamic time warping (DTW) to match the sub-sequences\n- STRAP shows improved performance compared to the prior framework (Behavior Retrieval, Du et al., 2023) which retrieves single state-action pairs using VAE." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors propose a task-specific robot learning framework using pre-collected datasets. Unlike the many robot learning methods that train the generalist policy model with multi-task expert data, the proposed method (STRAP) train a task-specific policies, which can yields better performance on single task. When the few-shot target demo, in addition to prior dataset is given, STRAP filters the task-relevant data from prior data and use it with target demo to train the model. One of the key features of STRAP is that it retrieves the data measuring the similarity between sub-trajectories, rather than the whole trajectories. Also, it utilize subsequence dynamic time warping (S-DTW) to match between the data. As a result, the proposed method shows improved performance compared to the previous methods, generalist policy models, and specialist models that only use the target data." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The idea of using only data relevant to the target task, rather than learning a generalist policy through multi-task data, is interesting. However, retrieving new data from prior dataset and training a policy each time a new scene is encountered is highly computationally costly.\n- Comparing the entire prior data with the target data one-to-one to measure similarity is not scalable with the dataset size. Moreover, since this retrieval process requires computationally intensive neural network operations, such as DINO, it raises questions about whether this process can be performed at test time. In particular, there is no mention of how to handle an increase in offline dataset size, nor are there any discussions about limitations in this regard.\n- There is no discussion about computational cost.\n- STRAP uses a top-k retrieval dataset. Increasing this k could bring in more data but might reduce relevance, whereas a smaller k would provide more refined data but with a smaller amount. However, there is a lack of analysis on how changing this k value affects performance." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. Would you please update the sites in the paper?\n2. Could you please add some experiments with other imitation learning methods?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. **Innovative Approach**: The authors present a compelling intuition, demonstrating robustness in solving multitask generalization challenges.\n2. **Efficient Data Usage**: The method shows improvement in the way data is leveraged for robotics tasks, particularly in sub-trajectory retrieval.\n3. **Thorough Experiments**: The experiments are detailed and show promising results for sub-trajectory retrieval and policy creation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "## Paper Review Summary\n\nThis work advocates for training policies dynamically during deployment, utilizing the encountered scenarios to improve model performance. Instead of relying on pre-trained policies to tackle new problems in a zero-shot fashion, the authors propose a non-parametric approach that retrieves relevant data and trains models directly at test time. The paper introduces SRTAP, a method built on pre-trained Vision-Language Models (VLM) and dynamic time wrapping, which combines sub-trajectories into a policy. The approach involves some training on a set similar in language to the test set, and has been demonstrated in both real-world and simulated environments.\n\n### Strengths:\n1. **Innovative Approach**: The authors present a compelling intuition, demonstrating robustness in solving multitask generalization challenges.\n2. **Efficient Data Usage**: The method shows improvement in the way data is leveraged for robotics tasks, particularly in sub-trajectory retrieval.\n3. **Thorough Experiments**: The experiments are detailed and show promising results for sub-trajectory retrieval and policy creation.\n\n### Weaknesses:\n1. **Project Incompleteness**: There is no accessible website or supplementary information, suggesting the project might still be unfinished.\n2. **Visual Readability**: The images in the paper are difficult to interpret, potentially detracting from the clarity of the results.\n3. **Writing Quality**: The paper's writing needs improvement, especially in terms of clarity and readability.\n4. **Generalization**: Some imitation learning methods, such as [Sparse Diffusion Policy](https://forrest-110.github.io/sparse_diffusion_policy/), [HPT](https://liruiw.github.io/hpt/), [RDT-Robotics](https://rdt-robotics.github.io/rdt-robotics), and [Humanoid Manipulation](https://humanoid-manipulation.github.io/), appear to show more generalization in similar settings.\n5. **Formatting**: The paper's formatting is problematic, with certain sections being hard to read, affecting the overall readability of the work.\n\nIn conclusion, while the proposed method shows strong intuition and detailed experimentation, there are concerns about project completeness, readability, and potential improvements in both writing and generalization when compared to existing work. \n\nI would like to change the rate after discussion, but at least you should finish the site you provide." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. **Project Incompleteness**: There is no accessible website or supplementary information, suggesting the project might still be unfinished.\n2. **Visual Readability**: The images in the paper are difficult to interpret, potentially detracting from the clarity of the results.\n3. **Writing Quality**: The paper's writing needs improvement, especially in terms of clarity and readability.\n4. **Generalization**: Some imitation learning methods, such as [Sparse Diffusion Policy](https://forrest-110.github.io/sparse_diffusion_policy/), [HPT](https://liruiw.github.io/hpt/), [RDT-Robotics](https://rdt-robotics.github.io/rdt-robotics), and [Humanoid Manipulation](https://humanoid-manipulation.github.io/), appear to show more generalization in similar settings.\n5. **Formatting**: The paper's formatting is problematic, with certain sections being hard to read, affecting the overall readability of the work." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Subsequence-DTW for sub-trajectory retrieval to augment few-shot policy learning" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024strap,\ntitle={{STRAP}: Robot Sub-Trajectory Retrieval for Augmented Policy Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4VHiptx7xe},\nnote={under review}\n}" }, "abstract": { "value": "Robot learning is witnessing a significant increase in the size, diversity, and complexity of pre-collected datasets, mirroring trends in domains such as natural language processing and computer vision. Many robot learning methods treat such datasets as multi-task expert data and learn a multi-task, generalist policy by training broadly across them. Notably, while these generalist policies can improve the average performance across many tasks, the performance of generalist policies on any one task is often suboptimal due to negative transfer between partitions of the data, compared to task-specific specialist policies. In this work, we argue for the paradigm of training policies during deployment given the scenarios they encounter: rather than deploying pre-trained policies to unseen problems in a zero-shot manner, we non-parametrically retrieve and train models directly on relevant data at test time. Furthermore, we show that many robotics tasks share considerable amounts of low-level behaviors and that retrieval at the \"sub\"-trajectory granularity enables significantly improved data utilization, generalization, and robustness in adapting policies to novel problems. In contrast, existing full-trajectory retrieval methods tend to underutilize the data and miss out on shared cross-task content. This work proposes STRAP, a technique for leveraging pre-trained vision foundation models and dynamic time warping to retrieve sub-sequences of trajectories from large training corpora in a robust fashion. STRAP outperforms both prior retrieval algorithms and multi-task learning methods in simulated and real experiments, showing the ability to scale to much larger offline datasets in the real world as well as the ability to learn robust control policies with just a handful of real-world demonstrations." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "dynamic time warping", "few-shot imitation learning", "retrieval", "foundation models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/624af64bda2bf4edc4459d3c8fc251d06f58b723.pdf" }, "presentation": null, "primary_area": { "value": "applications to robotics, autonomy, planning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "STRAP: Robot Sub-Trajectory Retrieval for Augmented Policy Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4VNfufHtoS
Test-time Correction with Human Feedback: An Online 3D Detection System via Visual Prompting
main
Active
Autonomous Driving;3D Object Detection;Test-time Error Correction
applications to computer vision, audio, language, and other modalities
3;3;5;5;6;6
4;4;5;3;3;3
3;2;2;2;3;3
2;1;2;2;3;3
2;2;3;3;3;3
4.666667
3.666667
2.5
2.166667
2.666667
-0.478091
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "It is recommended to include the full term \"Online Adapter (OA)\" the first time OA is mentioned in the abstract." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. Model flexibility: Accepts both monocular and multi-view data and supports any combination of various prompts (object, box, point, and novel visual prompts).\n2. Clarity of writing: The paper is well-written, logically structured, and easy to read.\n3. Extensive experiments: The main text and supplementary materials provide ample experiments to validate the effectiveness of TTC.\nPractical feasibility: The authors explain real-world application scenarios, achieving immediate error rectification through user-friendly prompts." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce the Test-time Correction (TTC) system, an innovative online 3D detection framework designed for correcting test-time errors in real-time through human feedback. TTC demonstrates the capability for immediate error rectification. Extensive experiments show substantial improvements in real-time error correction over pre-trained 3D detectors, even in challenging scenarios involving limited labels, zero-shot detection, and adverse conditions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. OA is one of the core modules of the model, serving as a bridge between prompts and offline-trained 3D detectors. However, the explanation of OA in the method section is somewhat abstract; adding simple illustrative diagrams could aid understanding.\n2. In the Related Work section, the Online 3D Detection System subsection discusses online 3D detectors. Expanding on offline 3D detectors would help readers better understand the development of offline versus online 3D detection.\n3. There are some minor typos in the text that need correction." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The paper does not clearly present how the proposed modules are trained, especially for the two layers MLP in key online adapter module." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The designed validation experiments are comprehensive.\n2. The proposed method is training-free, which makes it can be broadly applied." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a 3D object adaptation method that could adapt the human feedback online. The system could work with various visual prompts including reference images, box in the image and click in the image. The proposed methods is validated on both in domain and out of domain dataset, and demonstrating its effectiveness in these scenarios." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The proposed can only handle missing objects. It seems it cannot reduce the FP in test time.\n2. Although the authors explain the differences between proposed TTC and single object tracking, the explanation is unconvincing. The visual and object prompt can be easily used in SOT setting. Such discussion and at least comparisons with bbox annotations are must. This is the key concern in my evaluation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How are the visual prompts used during testing obtained?I'm not sure if it's just adding the corresponding areas of undetected targets to the visual cue buffer. \n2. Could the TTC method be combined with LLM-based 3D detection approaches to enhance generalization for novel object categories and domain shifts?\n3. How does the TTC method handle potential noise and latency in user feedback?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. Clear and well-structured writing, and easy to understand.\n2. Significant performance improvements across multiple 3D detectors and comprehensive ablation studies validate module effectiveness.\n3. The OA module is simple but effective." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a Test-time Correction (TTC) method that leverages human feedback to correct errors in real-time during testing. The core component, the Online Adapter (OA) module, enables existing 3D detectors to use visual prompts for continuously detecting previously undetected 3D objects." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Re-entering the regions of undetected targets as visual prompts introduces human knowledge, which may lead to potential biases and affect the fairness of comparative experiments.\n2. The TTC method needs to maintain a buffer of visual cues and solve the matching problem between cues and target objects, which increases the complexity.\n3. The experimental section lacks a description of how the visual prompts used during testing are obtained." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "\"To achieve such TTC system, we equip existing 3D detectors with OA module, an online adapter with prompt-driven design for online correction.\" However, the acronym \"OA\" is not defined in the abstract." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The broader impact of this paper may lie in inspiring the research community to further investigate the online rectification approach in autonomous driving systems. This crucial technology has the potential to significantly enhance the safety and reliability of safety-critical applications." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents the Test-time Correction (TTC) system, a novel online 3D detection framework designed to correct test-time errors through human feedback. This approach aims to enhance the safety of deployed autonomous driving systems.\n\nThe key idea of the TTC system is to improve existing 3D detectors with the Online Adapter (OA) module, a prompt-driven design that facilitates real-time error correction. Central to the OA module are visual prompts—images of missed objects of interest that guide corresponding detection and subsequent tracking. These visual prompts are stored in a visual prompt buffer to enable continuous error correction in subsequent frames. This approach allows the TTC system to consistently detect missed objects in real-time, thereby effectively reducing driving risks.\n\nExperimental results show that the proposed method, through test-time rectification, enhances the performance of offline monocular detectors (Zhang et al., 2022a), multi-view detectors (Wang et al., 2023c), and BEV detectors (Yang et al., 2023a) without the need for additional training." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper lacks self-containment. For example, in lines 217-231, where the authors describe various visual prompts, they mainly reference numerous other methods without offering sufficient detail. This heavy reliance on external sources renders the paper somewhat incremental, as it fails to clearly articulate the novel contributions and context of the visual prompts within the proposed framework. Furthermore, this lack of clarity results in the use of many notations, such as \"visual features\" and \"image features,\" without providing clear definitions.\n\n\nRather than referring to it as \"visual prompts,\" the pipeline developed in this paper essentially provides a template containing location and size information in the buffer, enabling generic tracking during test time without any additional training. Therefore, the authors are encouraged to clarify whether this pipeline fundamentally differs from a single-object tracker. Additionally, it would be beneficial to include an experiment comparing state-of-the-art (SOTA) trackers for test-time correction as part of the evaluation" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. This paper is well written, and easy to understand.\n\n2. Enhancing 3D detection is important task for autonomous driving.\n\n3. The performance gain is impressive." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents the Test-time Correction (TTC) system, an online 3D detection framework designed to correct test-time errors through real-time human feedback. Unlike conventional offline static 3D detectors, TTC aims to learn real-time error rectification by incorporating user feedback (e.g., clicks or bounding boxes). This approach allows for immediate updates to detection results for subsequent streaming inputs, even when the model operates with fixed parameters. The TTC system is achieved by integrating an OA module, an online adapter with a prompt-driven architecture, into existing 3D detectors for real-time correction. The key is visual prompts, specifically images of missed objects, which guide both current detection and future tracking. These visual prompts, representing objects missed during inference, are stored in a buffer to support continuous error correction across frames. Extensive experiments reveal substantial improvements in immediate error rectification compared to pre-trained 3D detectors, even under limited labeling, zero-shot detection, and challenging environmental conditions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Motivation\n\nPretrained static 3D detection modules have clear limitations due to issues like domain gaps, and I agree with the goal of improving these. However, I find it difficult to fully empathize with the motivation behind TTC. If issues in the predictions of the 3D detection module indeed pose a significant impact on safety, as stated, would there realistically be an opportunity to perform online correction in such scenarios?\n\nAdditionally, I am skeptical about the feasibility of interventions like visual prompting during driving. Operating devices such as navigation systems manually while driving is likely a legal violation in most countries, and in practice, the difficulty level for performing such tasks during driving seems exceedingly high.\n\n2. Comparison with TTA or TTT\n\nIn this field, there are various approaches for online improvement of pre-trained static models, such as test-time adaptation (TTA) and test-time training (TTT). Notably, most of these methods function without the need for human feedback. A thorough methodological and performance comparison with these approaches is essential. Additionally, while TTT may be somewhat challenging, in the case of TTA, it seems feasible to utilize human feedback as a direct learning guidance. I would appreciate a more in-depth discussion on this aspect as well.\n\n3. Robustness\n\nIt is unrealistic to expect that user corrections will always be accurate. Depending on the situation, incorrect user interventions could potentially worsen the proposed TTC. It would be beneficial to model the noise that might exist in visual prompting and demonstrate that TTC can operate robustly even when this noise becomes more pronounced." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Please refer to the Paper Weaknesses mentioned above.\n2. The experimental section only conducts multiple tasks on MonoDETR; however, multi-view 3D detection is currently the mainstream approach in autonomous driving solutions. It is recommended to include experiments on mainstream multi-view 3D detectors across various tasks.\n3. The proposed method focuses solely on correcting missed detections, yet false positives are also a significant issue in autonomous driving. Is there scalability for correcting false positives?\n4. The experimental section lacks comparisons with existing instruction-based 3D detection methods typically utilize text, boxes, or clicks as prompts; it is recommended to include such comparisons." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. This paper is clearly structured and written-well.\n2. This paper focuses on an interesting issue, namely that a online 3D detection system designated for online correction of test-time errors via human feedback." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Test-time Correction (TTC) system, a online 3D detection system designated for online correction of test-time errors via human feedback. The proposed TTC system includes two components: Online Adapter (OA) that enables 3D detectors with visual promotable ability, and a visual prompt buffer that records missing objects. Experiments were conducted on the nuScenes dataset, focusing on the TTC system across various 3D detectors and in out-of-training-distribution scenarios." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The rationale behind this task setup requires further discussion. Since the proposed task involves human feedback, this process is often uncontrollable, making it challenging to ensure real-time performance. This limitation affects the feasibility of applying the task in real-world autonomous driving scenarios.\n2. The rationale behind the EDS evaluation metric requires further discussion. Classification accuracy is also crucial for autonomous driving, and focusing solely on localization performance while neglecting classification performance is not realistic.\n3. The proposed method is only applicable to vision-based autonomous driving solutions, limiting its generalizability to LiDAR-based autonomous driving systems." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "This paper introduces Test-time Correction (TTC), a novel online 3D detection system designated for swift correction of test-time errors via human feedback to ensure the reliance on safety-critical autonomous driving systems." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024testtime,\ntitle={Test-time Correction with Human Feedback: An Online 3D Detection System via Visual Prompting},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4VNfufHtoS},\nnote={under review}\n}" }, "abstract": { "value": "This paper introduces Test-time Correction (TTC) system, a novel online 3D detection system designated for online correction of test-time errors via human feedback, to guarantee the safety of deployed autonomous driving systems. Unlike well studied offline 3D detectors frozen at inference, TTC explores the capability of instant online error rectification. By leveraging user feedback with interactive prompts at a frame, e.g., a simple click or draw of boxes, TTC could immediately update the corresponding detection results for future streaming inputs, even though the model is deployed with fixed parameters. This enables autonomous driving systems to adapt to new scenarios flexibly and decrease deployment risks reliably without additional expensive training. To achieve such TTC system, we equip existing 3D detectors with OA module, an online adapter with prompt-driven design for online correction. At the core of OA module are visual prompts, images of missed object-of-interest for guiding the corresponding detection and subsequent tracking. Those visual prompts, belonging to missed objects through online inference, are maintained by the visual prompt buffer for continuous error correction in subsequent frames. By doing so, TTC consistently detects online missed objects and immediately lowers down driving risks. It achieves reliable, versatile, and adaptive driving autonomy. Extensive experiments demonstrate significant gain on instant error rectification over pre-trained 3D detectors, even in challenging scenarios with limited labels, zero-shot detection, and adverse conditions. We hope this work would inspire the community to investigate online rectification systems for autonomous driving post-deployment. Code would be publicly shared." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Autonomous Driving", "3D Object Detection", "Test-time Error Correction" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/85f7cb7993f0642feaaff0c7af8c5c4327e9d3aa.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Test-time Correction with Human Feedback: An Online 3D Detection System via Visual Prompting" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4VfPLTqdrq
Understanding Scale Shift in Domain Generalization for Crowd Localization
main
Active
Crowd Localization;Domain Generalization;Scale Shift
applications to computer vision, audio, language, and other modalities
3;5;5;5;6
5;4;4;4;4
3;2;2;2;3
2;2;2;3;3
3;3;3;3;3
4.8
4.2
2.4
2.4
3
-0.918559
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I have no further questions beyond those outlined in the weaknesses section." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The identification of scale shift as a specific domain shift challenge in crowd localization, and the introduction of Scale Shift Domain Generalization, bring attention to an under-explored issue with significant real-world implications. ScaleBench provides a standardized benchmark, adding practical value for the research community.\n2. The authors provide a clear theoretical explanation linking scale shift to diversity and correlation shifts, elucidating why DG models struggle with this issue. This rigorous analysis adds depth to the understanding of scale shift and its implications for DG.\n3. The paper is well-organized, with each section following logically from the last. The clear delineation between problem identification, analysis, and solution makes the contributions easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a significant contribution to domain generalization (DG) for crowd localization by addressing the challenge of scale shift, where differences in head size distributions between training and testing data impact model performance. The authors introduce ScaleBench which categorizes datasets based on scale distributions. They also propose Semantic Hook, an algorithm designed to mitigate scale shift by reinforcing the association between semantic features and task predictions. Through testing 20 state-of-the-art DG algorithms on ScaleBench and conducting theoretical analysis, the authors highlight the limitations of current approaches and introduce Scale Shift Domain Generalization as a novel research direction." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The introduction lacks highlighting core contributions and findings.\n2. Although the authors indicate that the paper does not primarily focus on introducing a new method, the experiments in the main text feel somewhat limited. \n3. The appendix contains several formatting issues, particularly with tables. Inconsistencies include varying font sizes, tables floating in the middle of pages, and some tables exceeding the page width. These layout problems affect readability and detract from the paper's presentation quality." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Explain how Semantic Hook handles the scale shift on domain generalization. From the given formulation, the semantic difference will highlight the effect of the perturbation, and the decoder is now learning to map noise to task-specific outcomes. So, how does Semantic Hook reduce generalization risk?\n2. The conditional probability derived in Eq. 6 is incorrect at the first integral. The conditional probability P(y|x) does not equal integrating P(y|z) over the domain of Z. Please provide the correct derivation.\n3. The scale shift is more prevalent in crowd images under perspective projection; however, the scale is more uniform throughout the scene for aerial views. How does the proposed method handle different projections?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper addresses an under-explored issue of scale shift in domain generalization for crowd localization. In terms of contributions, the paper delivers manually annotated bounding boxes for crowd localization on existing public crowd benchmarks. The paper is well-structured and provides a good analysis of the problem, resulting in a novel solution method called Semantic Hook. Further, the authors take an analytical route for the scale shift under domain generalization connecting other attributes present in datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper investigates the impact of scale shift on domain generalization in crowd localization. Models experience performance degradation due to head scale distribution shifts in training and testing datasets. To address this, the authors provide a theoretical analysis of the scale shift under domain generalization and introduce a novel method to mitigate the effect of scale shift, called Semantic Hook. The paper proposes a new benchmark called ScaleBench and provides bounding box annotations for existing public crowd benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper needs more detailed explanations regarding how Semantic Hook mitigates the scale shift in domain generalization. It also needs to clarify which variables or attributes are being generalized from the perturbation added during training. Additionally, the improvements from the proposed method on the ScaleBench benchmark are marginal compared to the baseline method. Furthermore, the mathematical formulations (Eq. 6) used for the theoretical analysis need to be corrected." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. What does global feature mean in Table 5? Figure 2 shows that semantic features are extracted from the encoder. How to extract global feature?\n2. Is the proposed method sensitive to the choice of gamma?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. A controlled benchmark is established to study scale variance in crowd localization.\n2. This paper proposes SemanticHook to handle scale shift.\n3. Comprehensive analyses are presented to quantify the influence of scale shift." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper aims to study the effect of scale shift in crowd localization. To this end, a benchmark, dubbed ScaleBench, is first established to quantify the influence of scale shift. Next, SemanticHook is proposed to tackle scale shift. The key idea is to enhance the association between semantic features and targets by perturbing the input image with Gaussian noise. Empirical analyses on ScaleBench justify the effect of scale shift." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The rationality of ScaleBench is questionable. First, while the perspective effect often occurs in crowd localization, there exist images that are captured from different angles (e.g., top view). In such scenarios, image distribution regularization may fail to partition the images correctly. Second, in the real world, scale shift is often coupled with other factors, such as occlusion, weather, and appearance. For example, when the object suffers from significant appearance variations, the counting model may fail to localize objects even if training and testing data yield the same scale distribution. Third, dividing images into patches will inevitably result in incomplete objects, which could affect the localization results. Therefore, evaluations on ScaleBench may not rigorously reflect the influence of scale shift. \n2. The proposed SemanticHook does not exhibit superiority over existing methods. As shown in Table 1, the simplest baseline ERM already achieves good results. The proposed method is not necessarily better than ERM.\n3. Following the previous comment, the rationale of SemanticHook is not entirely convincing. Eq. 6 suggests that p(s, c, …) can lead to a spurious association between the output y and scale c. This term is a joint distribution of semantic s and scale c. However, the authors merely try to enhance the semantic association between semantic s and output y. Experimental results demonstrate that such a technique does not address scale shift effectively. Additionally, perturbing image is not a new idea, which is widely used in adversarial attack.\n4. It appears that the influence of image interpolation is not rigorously quantified in Table 4. First, the implementation of Random Augmentation shall be modified according to different domains, i.e., the range of random scaling should be customized based on domain Tiny, Small, and Normal. Second, it is necessary to train the model using different source domains to identify the effect of image interpolation. The results on domain Big are insufficient to conclude that the benefits of image interpolation are modest.\n5. Regarding training details. In practice, random scaling is commonly used to alleviate scale variations. As the authors use this technique to train the model, the reported results may not correctly reveal the effect of scale shift, because random scaling already simulates different scales.\n6. The paper lacks evaluations on previous methods featuring multi-scale architecture, e.g., STEER. Evaluations on these methods are helpful in revealing whether previous methods can handle scale variations." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1) How does \"scale shift\" in crowd localization fundamentally differ from other types of domain shifts?\n2) How does Semantic Hook compare with simpler baseline methods, such as multi-scale training or augmentations?\n3) Can the spurious association between scale and the output be quantified?\n4) How would ScaleBench and Semantic Hook perform in real-world crowd localization scenarios with continuous scale distributions?\n5) What are the limitations of ScaleBench in generalizing to diverse crowd analysis tasks?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1) The development of ScaleBench is a major contribution, offering a curated dataset specifically designed to study scale shift effects on domain generalization.\n2) The paper introduces the Semantic Hook as a novel approach to reduce the impact of scale shift in domain generalization tasks.\n3) The paper is well-structured and logically organized. Offering theoretical insights and comprehensive empirical evaluations." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper focuses on the impact of scale variations on crowd localization models' ability to generalize across datasets. To tackle this, the authors introduce ScaleBench, a new benchmark dataset specifically curated to study scale shift, and evaluate 20 existing domain generalization algorithms, showing that many struggle with this type of shift. They also propose an approach, Semantic Hook, aimed at mitigating scale shift by strengthening the association between semantic features and predictions, rather than relying on scale information. While the improvements are modest, the paper offers valuable insights into scale-based generalization challenges." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1) The paper frames the issue of scale shift as a new challenge within domain generalization for crowd localization, but this framing seems overstated. Scale shift, where different head sizes (scales) impact model performance across datasets, is not fundamentally new. Previous works have already explored the impact of scale variation on domain adaptation in crowd analysis (albeit under different terminologies), suggesting that this issue is more a subset of a well-studied generalization problem rather than a novel concept. Claiming it as the \"first study\" on \"scale shift domain generalization\" could be seen as an attempt to rebrand existing challenges without sufficient justification.\n\n2) The proposed \"Semantic Hook\" technique to mitigate scale shift claims to enhance the association between semantic features and task predictions, but its practical effectiveness remains questionable. This method involves adding Gaussian noise to \"hook\" relevant features, yet the theoretical rationale behind this approach is underdeveloped. How \"Semantic Hook\" contributes to decoupling scale-related biases from semantic content is unclear. Additionally, the improvement in F1 scores presented in Table 2 is marginal, suggesting that the Semantic Hook might not be a robust solution.\n\n3) While the paper provides a comparison of 20 domain generalization algorithms, there is little discussion about the practical differences in their robustness against scale shifts. The Semantic Hook’s performance is only marginally better than ERM, raising doubts about its practical value. Furthermore, the experiments rely heavily on F1 scores across ScaleBench domains but do not include additional evaluation metrics (e.g., precision, recall) that could provide a fuller picture of model performance under scale shift.\n\n4) ScaleBench, with its scale-based domain partitions, may not accurately reflect real-world applications where scale distributions are more complex and continuous rather than discretely defined. The Leave-One-Out approach used for evaluation also artificially simplifies the generalization challenge. Real-world scenarios often involve more nuanced and diverse shifts between training and deployment environments, suggesting that the paper’s evaluation may lack external validity." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1-What is the difference between semantic hook and other methods that also purturb the image for crowd counting/localization?\n\n2- What other methods specifically in crowd counting and/or localization exist that have addressed the scale variance? Have any of these methods been implemetned in this paper?\n\n3- Why does each of the previous methods fail in addressing this issue? What is the authors insight in this matter?\n\n4- How were the hyperparameters for each model set? Did the authors use grid search?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1- The paper is well-written and engaging, and the ideas flow smoothly. \n\n2-The field of crowd counting/localization would benefit from an analytical work focused on the issue of scale variance, as scale shifts present a significant challenge for model generalization across diverse domains. This paper addresses this gap, and provides both a theoretical framework and a practical benchmark." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper analyzes domain generalization under scale shift in crowd localization, where object scales vary across domains. To address the lack of benchmarks for studying scale-related shifts, the authors introduce Scale Bench. This benchmark divides data into domains based on scale and evaluates models on their ability to generalize to unseen scales. They propose Semantic Hook, a training method that uses noise perturbations to reduce scale reliance and strengthen semantic associations in model predictions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1- There is limited mention (with brief explanation for each) of papers explicitly in crowd counting/localization fields that tackle the issue of scale variance.\n\n2- While the paper provides a comprehensive benchmark for scale-related domain generalization, it lacks coverage of crowd counting/localization methods specifically designed for domain generalization. How many of the methods in Table 2 discuss scale variance for crowd counting specifically?\n\n3- The paper does not clarify what has been done to prevent overfitting, particularly given the possible complexity of the models relative to the training data provided. \n\n4- Although Tables 6-18 and a brief discussion for each are included in the appendix, the paper lacks an in-depth analysis explaining why certain methods outperform others in specific cases. A discussion of these results would add valuable context to understand the strengths and limitations of each approach under different scale conditions. what could be the issue that each of these methods fail in generalizing to new domain?" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We present a ScaleBench to benchmark scale shift domain generalization, and propose rigorous theoretical analysis for this issue, which further motivates an algorithm called SemanticHook to support the following research on this issue." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024understanding,\ntitle={Understanding Scale Shift in Domain Generalization for Crowd Localization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4VfPLTqdrq},\nnote={under review}\n}" }, "abstract": { "value": "Crowd localization plays a crucial role in visual scene understanding towards predicting each pedestrian location in a crowd, thus being applicable to various downstream tasks.\nHowever, existing approaches suffer from significant performance degradation due to differences in head scale distributions (scale shift) between training and testing data, a challenge known as domain generalization (DG). This paper aims to comprehend the nature of scale shift within the context of domain generalization for crowd localization models.\nTo this end, we address three key questions: (i) how to quantify the scale shift influence on DG task, (ii) why does this influence occur, (iii) how to mitigate the influence.\nSpecifically, we first establish a benchmark, ScaleBench, and reproduce 20 advanced DG algorithms, to quantify the influence. \nThrough extensive experiments, we demonstrate the limitations of existing algorithms and highlight the under-explored nature of this issue.\nTo further understand its behind reason, we provide a rigorous theoretical analysis on scale shift. \nBuilding on this analysis, we further propose a simple yet effective algorithm called Semantic Hook to mitigate the influence of scale shift on DG, which also serves as a case study revealing three significant insights for future research. Our results emphasize the importance of this novel and applicable research direction, which we term $\\textit{Scale Shift Domain Generalization}$." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Crowd Localization", "Domain Generalization", "Scale Shift" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/8047c3457bc46a3ef69ecd143d0616e779227bcc.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/7f35bf90ff7898c77c950093b61c8bd4c3180831.zip" }, "title": { "value": "Understanding Scale Shift in Domain Generalization for Crowd Localization" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4VmagzA2Tp
Improving Molecule-Language Alignment with Hierarchical Graph Tokenization
main
Active
molecular-language alignment;large language models;hierarchical graph neural networks;tokenization;biomolecular studies;molecule
learning on graphs and other geometries & topologies
3;3;5;5
3;3;4;4
2;2;3;3
2;1;2;2
3;2;3;3
4
3.5
2.5
1.75
2.75
1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. How many parameters do those two tokenizers have respectively?\n2. What are the ablation study results on other tasks such as property prediction and chemical reaction prediction? \n3. What are the input and output of the molecular property prediction task and other tasks? The performance gain mainly comes from hierarchical graph tokenization, and it has nothing to do with the new tuning dataset, right?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The Hierarchical Graph Tokenization (HIGHT) technique is a major advancement. By incorporating hierarchical structure at multiple levels (node, motif, and graph), the paper addresses a crucial gap in previous molecule-language alignment methods, which typically rely only on node-level information. This hierarchical approach captures the functional groups and structural motifs inherent in molecules, improving the model’s ability to represent complex biochemical properties accurately.\n\n2. The introduction of HiPubChem, an augmented molecular instruction tuning dataset enriched with motif and functional group information, enhances model training by aligning molecular structural details with language descriptions. This contribution is valuable for future work in molecular and biochemical language model alignment.\n\n3. The effectiveness of each of the two methods was verified through simple ablation studies." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a new approach to aligning molecular graph representations with language using a method called Hierarchical Graph Tokenization (HIGHT). Traditional graph-language alignment models primarily focus on node-level information, often neglecting the inherent hierarchical structure of molecules, which leads to alignment issues and hallucination in large language models (LLMs).\n\nThe authors introduce HIGHT, which utilizes a hierarchical graph tokenizer to capture information at the node, motif, and entire molecule levels. This tokenizer incorporates both atom-level and motif-level tokens, which are then used to improve alignment with language models. To address the alignment of hierarchical molecular data with textual descriptions, the authors also develop an enhanced molecular instruction tuning dataset called HiPubChem, which provides detailed motif information." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The introduction of the hierarchical graph tokenizer seems to make the tokenizer larger compared with the ordinary node-level tokenizer. It should be discussed that whether the performance gain comes from the larger tokenizer.\n\n2. There should be more detail descriptions and discussions about the evaluation tasks." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Listed in Cons." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "An improved tokenization of molecular graphs that enriches molecule's description with motif tokens." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors study Large Graph Language Models (LGLM). Drawing inspiration from Multimodal LLMs, authors focus on the task of incorporating graph data as a separate modality with a GNN encoder and an adapter. Authors conclude that node-centric tokenization of molecules leads to LLM hallucinations when asked about the presence of specific fragments. To overcome this issue, the authors propose to enrich the molecule's description by adding the tokens corresponding to BRICKS-fragments that are present in the molecule. The experimental results demonstrate that such a tokenization scheme reduces the amount of motif-related hallucinations and improves performance on other tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Specifically, most existing LGLMs directly take the node tokens from GNNs as inputs to LLMs (Cao et al., 2023): \nThe paper cites InstructMol as a previous approach that utilizes node-centric tokenization. However, if I understand correctly, InstructMol takes the embedding of the whole graph along with the SMILES representations of the molecule. Moreover, it is not clear which previous models use the node-centric tokenization and whether there are such models at all.\n\nSection 4.3 describes the fine-tuning approach that involves two stages, where the second stage is the finetuning on MoleculeNet, CheBI-20 and Mol-instructions specialized datasets. In my opinion, this implies that the resulting model is specialized. Please, provide better explanation for specialist and generalist models.\t \t \t \t\t\n\nTaking into consideration that the difference between specialist and generalist models is not clear, the resulting model does not demonstrate performance superior to baselines in most of the experiments.\n\nThere is no comparison with [1] in Table 4. The results in [1] are superior to all the models from Table 4.\n\nIn Table 5, the Mol-instruction has the highest MACCS FTS for the retrosynthesis task. However, a smaller number is balded.\n\nThe comparison on MotifHallu is not complete. Please provide comparison with SMILES-based approaches. Moreover, the improvement on the MotifHally benchmark is expected, as the proposed approach was explicitly designed to better solve this task.\n\n[1] Srinivas, S. S., & Runkana, V. (2024). Crossing New Frontiers: Knowledge-Augmented Large Language Model Prompting for Zero-Shot Text-Based De Novo Molecule Design. arXiv preprint arXiv:2408.11866." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. At line 273, the authors said \"attach positional encodings $p$ to all of the tokens\", How are position encodings of motifs obtained?\n2. If the input graphs use the positional encodings, then, should the original positional encodings in the LLMs be diabled? e.g, the widely used ROPE for the graph input part? \n3. What is the papameter count to be tuned?\n4. Besides the vicuna-v-1.3-7B, can the authors provide experimental resutls for other LLM backbones? Since different backbones may have a big impact on the performance.\n5. How is the proposed model performance for zero-shot or few-shot scenarios?\n6. In table 2, llama 13b has wrose performance than llama 7b on most of datasets. Also, Galactica-120B has a sharp performance drop on BACE. Any explanations on these results?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper proposes to incorporate hierarchical graph information into LGLMs, and the authors achieve this with new architecture and instruction tuning dataset HiPubChem.\n2. To address hallucination issue, the paper creates MotifHallu, the first hallucination benchmark based on the existence of common functional groups.\n3. The paper includes extensive experiments with 14 real-world molecular and reaction comprehension benchmarks. The results show that HIGHT significantly reduces the hallucination on MotifHallu and demonstrates significant improvement on a number of tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes HIerarchical GrapH Tokenization (HIGHT), which tries to improve how LLMs understand and process molecular data. The key idea of HIGHT is to introduce a hierarchical graph tokenizer that extracts and encodes information at multiple levels: atoms, motifs, and the overall molecule. The paper demonstrates that HIGHT can reduce hallucination and improve performance across various molecular tasks, including property prediction, molecular description generation, and chemical reaction prediction." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The hierarchical graph tokenization process, which involves the use of multiple adapters, is likely to be more computationally expensive than traditional node-centric tokenization. The paper does not discuss the computational complexity. Also, LLM is tuned using LORA, and the number of parameters tuned should be discussed.\n2. One motivation of applying LLMs for graph data is to utillize the generalization capability of LLMs. However, this paper do not provide experimental results on zero-shot or few-shot scenarios of the proposed model. I think it will greatly strength the paper if HIGHT has good performance under such cases.\n3. The performance of HIGHT will largely depend on the backbone LLMs, and only vicuna-v-1.3-7B is evaulated." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- In tables 3, 4, 5, are all baselines also fine-tuned with the same dataset as HIGHT?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The overall presentation is clear and the paper is easy to follow. The work proposes a complete pipeline to build a model with stronger motif/functional group querying ability. Using motif tokens is a straight-forward solution to enhance such ability. Various experiments are conducted to validate the model." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a framework named HIGHT to align molecular data with LLMs. It identifies a shortcoming of LLM on learning functional groups, and proposes to extend the graph tokenization to motif level. Specifically, its input to the LLM includes node/atom embeddings as well as motif embeddings. The model is fine-tuned with motif prediction tasks on a dataset constructed using RDKit. The model shows good performance on molecule properties prediction compared to language models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- From the novelty and contribution perspective, taking motif representations/tokens is not new. By simply searching on Google, I found several papers that extract motifs for graph modeling [1, 2] (as the author also mentioned in the paper). This work is a simple extension of these techniques to align the motifs to the LLM.\n\n- If I understand correctly, the motif tokenization algorithm, BRICS, will break the molecule in a very chemistry-aligned way. For example, a \"OH\" functional group will be tokenized into a motif. The downstream task of identifying the functional group will be very easy (simply aligning a single motif token with the text description of the function group, and the task is like asking \"does a -OH motif have -OH functional group\"). The author should justify how this is a helpful task besides simply telling the LLM that \"there is such a functional group.\" For example, the author should show that the method has better downstream performance than simply telling the LLM the existence of functional groups.\n\n- The distinction between specialist model and generalist model is arbitrary to me. Methods like MolFM and Text+Chem T5-augm-base have the same functionality as the proposal, yet they achieved better performance than HIGHT. I think the HIGHT is more specialized, as it requires explicit and specialized atom and motif tokenization. Can you be more specific about the distinction, and what's the advantage of a generalist model?\n\n- Even without the motif tokens, many models achieved stronger performance. Can you explain why a better motif prediction ability does not lead to better downstream performance? Link back to weakness 1, does this also mean that the proposed task is too easy for the motif tokenization, preventing the model from learning meaningful/molecule-property-related from the pretraining process?\n\n[1] Zhang, Zaixi, et al. \"Motif-based graph self-supervised learning for molecular property prediction.\" Advances in Neural Information Processing Systems 34 (2021): 15870-15882.\n[2] Chen, Xuexin, et al. \"Motif graph neural network.\" IEEE Transactions on Neural Networks and Learning Systems (2023)." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We present a new strategy that incorporates hierarchical graph information into supervised finetuning and instruction datasets for a better alignment of graph and language modalities." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024improving,\ntitle={Improving Molecule-Language Alignment with Hierarchical Graph Tokenization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4VmagzA2Tp},\nnote={under review}\n}" }, "abstract": { "value": "Recently there has been a surge of interest in extending the success of large language models (LLMs) to graph modality, such as molecules. As LLMs are predominantly trained with 1D text data, most existing approaches adopt a graph neural network to represent a molecule as a series of node tokens and feed these tokens to LLMs for molecule-language alignment. Despite achieving some successes, existing approaches have overlooked the hierarchical structures that are inherent in molecules. Specifically, in molecular graphs, the high-order structural information contains rich semantics of molecular functional groups, which encode crucial biochemical functionalities of the molecules. We establish a simple benchmark showing that neglecting the hierarchical information in graph tokenization will lead to subpar molecule-language alignment and severe hallucination in generated outputs. To address this problem, we propose a novel strategy called HIerarchical GrapH Tokenization (HIGHT). HIGHT employs a hierarchical graph tokenizer that extracts and encodes the hierarchy of node, motif, and graph levels of informative tokens to improve the graph perception of LLMs. HIGHT also adopts an augmented molecule-language supervised fine-tuning dataset, enriched with the hierarchical graph information, to further enhance the molecule-language alignment. Extensive experiments on **14** molecule-centric benchmarks confirm the effectiveness of HIGHT in reducing hallucination by **40%**, as well as significant improvements in various molecule-language downstream tasks." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "molecular-language alignment", "large language models", "hierarchical graph neural networks", "tokenization", "biomolecular studies", "molecule" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/11d66ed0808199b68dea9e77505357d3444ff669.pdf" }, "presentation": null, "primary_area": { "value": "learning on graphs and other geometries & topologies" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Improving Molecule-Language Alignment with Hierarchical Graph Tokenization" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4W1wTg7q9o
UrbanWorld: An Urban World Model for 3D City Generation
main
Active
Urban world model;3D city generation
applications to computer vision, audio, language, and other modalities
3;5;5;5
5;5;3;5
2;3;3;2
1;2;2;2
2;3;3;3
4.5
4.5
2.5
1.75
2.75
-0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See Weakness." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The task of 3D urban generation is important.\n2. The method is reasonable and looks to have better quantitative results than previous models.\n3. The writing is clear and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a generative urban world model that can automatically create a customized, realistic, and interactive 3D urban world with flexible control conditions. The code of this work was released." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Claim of World Model. This work belongs to the 3D urban generation. It is over-claimed to be a world model and barely related to AGI. Authors should precisely identify the task and topic. Then, focus on the specific topic and make it comprehensive rather than claim some large topics.\n\n2. Technical contributions. The motivation of the generation pipeline is unclear. Why do you need a vision language model? What are the special designs in your work different from others, and why do you need them? What is the special challenges that lead you to design the method? So far, the pipeline looks like a combination of recent advanced techniques, i.e., diffusion model and vision language model.\n\n3. Visual results. The visual results are insufficient. From only a few images, it can not be convinced that the visual quality is better than other models. Also, Figure 6 and 4 have some reduplicate results.\n\n4. Evaluation of interactive environments. The evaluation of interactive environments is coarse. The navigation tasks are not really evaluated. From an image, nothing can be evident. What are the quantitative results, and what are the video results? How do you make the simulation of physics? What is the physics engine? What is the training speed? What is the model, RL or IL? What are the evaluation metrics?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is written fluently and is easy to understand.\n2. The proposed method shows relatively better results in generating city scenes with assets that have new appearances.\n3. The authors effectively showcase various capabilities of the pipeline." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a method for 3D urban scene creation called UrbanWorld, which facilitates customized and interactive 3D urban world generation. UrbanWorld uses Blender to create untextured 3D layouts from 2D maps and incorporates Urban MLLM to generate textual descriptions for assets. A diffusion-based method is then applied to generate and refine the geometry of the 3D assets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. While the authors state that the method achieves “customized, realistic, and interactive 3D urban world generation,” the results appear more simulation-style and fall short of true realism. The texture quality, as seen in Fig. 3 and 4, is not particularly impressive, and there are no significant improvements over CityDreamer.\n2. The absence of video results is notable. For a 3D generation task, video demonstrations would better illustrate the quality and realism of the generated scenes.\n3. Fig. 4 includes scenes with humans and vehicles, but the method of incorporating these assets is unclear. Details on how these elements are introduced and animated within the scene are missing.\n4. Most visual results focus on limited, local areas. For a city-level generation, it would be beneficial to include bird’s-eye-view results covering larger spatial regions, similar to CityDreamer.\n5. Including a user study comparison would provide a clearer assessment of the visual quality of the generated scenes.\n6. Although the authors claim the ability to create new assets, this appears limited to the level of appearance, with geometry remaining unchanged from the asset library. Given the importance of geometry in 3D generation, this aspect should be addressed." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. In Figure 3, texture refinement only shows marginal improvement for buildings. Authors should provide more examples, including other objects and views.\n\n2. In Figure 4, the authors shows existence of human and vehicles, how are these generated? Are they also assets generated at some stage? Or done by manually post-processing? It is not mentioned anywhere in the paper, and this indicate the visual quality comparison with other methods is completely unfair.\n\n3.Since the framework generate 3D scenes, I suggest the authors to submit videos or at least multi-views of the same scene to demonstrate quality and view consistency of the generated scenes." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "UrbanWorld introduces a pipeline that integrates generative diffusion models with an urban-specific MLLM to achieve realistic urban scene creation. This combination allows for controlled generation of 3D assets and adaptive urban design." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces UrbanWorld, a generative model designed for the automatic creation of realistic, customizable, and interactive 3D urban environments. UrbanWorld employs a progressive, four-stage generation pipeline: flexible 3D layout creation, Urban Multimodal Large Language Model (Urban MLLM)-based scene design, diffusion-based asset rendering, and MLLM-driven scene refinement." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The authors claimed section A is flexible urban layout “generation”. However, this is not like generation methods where the distribution of urban layouts are learned from real-world data [1][2][3]. It seems like the authors are just using OSM’s GT data (AIGC-layout is not explained anywhere in the paper). No detail is given on how did the authors transform the OSM data or AIGC data into untextured 3D urban environment. Is there any generation models or other networks involved? In short, if you are just using GT data and Blender add-on to import it, you can’t call the process “generation”.\n\n2. In section 3.2 and the Appendix A.2, the authors shows a general urban generation prompt is converted into prompts for different categories of urban objects. However, the same prompt is generated for all objects of the same class. Doesn’t that indicate they would have exact same style and appearance? For example, if there were 50 buildings in the scene, and they all share the same T2I prompt, they end up looking the same. Meanwhile, the authors introduced descriptions for all categories are generated by an MLLM, but did not explain where does the reference image comes from.\n\n3. For a single asset, the authors generated textures from different views conditioned on the same text and reference image, then merged all textures. This approach cannot guarantee consistency between textures as no 3D condition has been used to guide the 2D diffusion model. Meanwhile, it cannot be called “3D diffusion renderer”, since the authors are only inferencing iteratively from pretrained 2D diffusion models. \n\n[1] Infinicity: https://arxiv.org/abs/2301.09637\n[2] CityDreamer: https://arxiv.org/abs/2309.00610\n[3] CityGen: https://arxiv.org/abs/2312.01508" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the weakness part." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is well-written and easy to follow. \n2. The code has been released. \n3. The overall framework is technically sound. Based on a pre-defined set of common objects in the urban scenario, the framework bridges the gap between the 3D world and 2D views via pre-trained diffusion models. The pipeline is interesting. \n4. The framework achieves controllable and customizable scene generation, which can support tasks that require agent-environment interactions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces UrbanWorld, a generative model designed to automatically create realistic and interactive 3D urban environments, addressing the challenges of manual labor in urban scene design. UrbanWorld employs a progressive diffusion-based rendering method and a specialized urban multimodal large language model (Urban MLLM) trained on street-view image-text data to guide the generation process. The model consists of four key stages: flexible 3D layout generation, urban scene design using Urban MLLM, controllable asset rendering, and scene refinement. Extensive evaluations demonstrate that UrbanWorld achieves state-of-the-art realism and interactivity, outperforming existing methods like Infinicity and CityGen in generating diverse urban environments suitable for embodied agents." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Even though superior quantitative results are reported, the generated images are not realistic enough based on the demonstration in the paper. \n2. It would be better if the authors could provide more diverse qualitative results generated by the proposed method. The proposed system is claimed to be for 3D city generation. It would be good if a sequence of images/video captured with a moving camera is included to show the scene-level generation capability. \n3. I am confused about the UV unwrapping and UV wrapping parts. How can you ensure the wrapping process can align the texture perfectly to the mesh model? For objects of different types and shapes, I believe this process can be hard to model by the diffusion model. The UV unwrapping is usually not unique. Is there any mechanism to enforce the equivariance to different unwrapping manners? \n4. I noticed that the Position-awareTexture Completion module is applied to refine the texture map. Can you provide some qualitative results (visualization) to compare the results before and after the refinement?\n5. The section 4.4 is a little bit vague. How does your generated environment support navigation? How far is the longest distance your navigation can achieve? It could be better to show a bird-eye-view of your navigation environment." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose UrbanWorld, the first generative urban world model that can automatically create a customized, realistic and interactive 3D urban environments." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024urbanworld,\ntitle={UrbanWorld: An Urban World Model for 3D City Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4W1wTg7q9o},\nnote={under review}\n}" }, "abstract": { "value": "Cities, as the essential environment of human life, encompass diverse physical elements such as buildings, roads and vegetation, which continuously interact with dynamic entities like people and vehicles. Crafting realistic, interactive 3D urban environments is essential for nurturing AGI systems and constructing AI agents capable of perceiving, decision-making, and acting like humans in real-world environments. However, creating high-fidelity 3D urban environments usually entails extensive manual labor from designers, involving intricate detailing and representation of complex urban elements. Therefore, accomplishing this automatically remains a longstanding challenge. Toward this problem, we propose UrbanWorld, the first generative urban world model that can automatically create a customized, realistic and interactive 3D urban world with flexible control conditions. Specifically, we design a progressive diffusion-based rendering method to produce 3D urban assets with high-quality textures. Moreover, we propose a specialized urban multimodal large language model (Urban MLLM) trained on realistic street-view image-text corpus to supervise and guide the generation process. UrbanWorld incorporates four key stages in the generation pipeline: flexible 3D layout generation from OSM data or urban layout with semantic and height maps, urban scene design with Urban MLLM, controllable urban asset rendering via progressive 3D diffusion, and MLLM-assisted scene refinement. We conduct extensive quantitative analysis on five visual metrics, demonstrating that UrbanWorld achieves state-of-the-art generation realism. Next, we provide qualitative results about the controllable generation capabilities of UrbanWorld using both textual and image-based prompts. Lastly, we verify the interactive nature of these environments by showcasing the agent perception and navigation within the created environments. We contribute UrbanWorld as an open-source tool available at https://github.com/Urban-World/UrbanWorld." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Urban world model", "3D city generation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/8748fc45a71987389d0007bd5025742987995b68.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "UrbanWorld: An Urban World Model for 3D City Generation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4WsHgA8EG1
BalancEdit: Dynamically Balancing the Generality-Locality Trade-off in Multi-modal Model Editing
main
Active
Multi-modal learning;Model editing
foundation or frontier models, including LLMs
3;3;5
4;5;4
2;2;2
2;2;2
1;3;3
3.666667
4.333333
2
2
2.333333
-0.5
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.Why is the accuracy for the Base model in Table 3 not 0? In my humble opinion, the Acc and Loc should be 0 and 100 respectively, similar to the results presented in [1]. \n2. Why were sequential editing experiments conducted on OKVQA instead of MMEdit and OKEDIT, as proposed in this paper?\n3. Previous studies have indicated that the MEND method can produce NaN values [2] during sequential editing; however, this issue does not appear in Table 4. Are there differences in the sequential editing settings between this study and [2]?\n4. IMHO, if the weights in the language module are edited, it is essential to measure text locality and compare it with other methods.\n5. The paper states that black images are used as negative samples across various visual recognition tasks. It would be beneficial to include citations to support this approach.\n6. Some proprietary terms, such as MiniGPT-4 and BLIP-2 OPT, are used inconsistently throughout the text.\n\n[1] Can We Edit Multimodal Large Language Models?\n[2] VLKEB: A Large Vision-Language Model Knowledge Editing Benchmark" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The issues addressed in this paper are of considerable significance. Existing editing methods affect the performance of the edited model on samples related to the edited ones. This paper proposed a new method to adjust the influence radius dynamically. The innovative approach of using positive and negative samples to estimate the influence radius of each knowledge edit is particularly commendable. Additionally, the paper clearly articulates the above issues and presents corresponding solutions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Existing knowledge editing methods often overlook the influence scope of a knowledge edit, leading to limited generality and locality about samples similar to the edited ones. This paper proposes a novel method, BalancEdit, to optimize the trade-off between generality and locality. To assess this trade-off, this paper constructed a new dataset, OKEDIT. Experimental results demonstrate that BalancEdit outperforms existing methods in both single and sequential editing settings." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The proposed method builds upon Grace [1], with the key differences being using positive and negative samples to estimate the influence radius and the fine-tuning of the transformation layer. However, the paper does not include an ablation study to evaluate the contributions of these two modules.\n2. The proposed dataset OKEDIT employs GPT-4 and a diffusion model to generate rephrased images for assessing image generality. However, previous studies have noted that generated images may shift in content, leading to inconsistencies with the original images [2]. \n3. The use of the harmonic mean (HM) is questionable, as the presence of a minimum can result in a lower harmonic mean. In Table 3, the FT method applied to BLIP2-OPT shows a performance of less than 1% on the locality.\n\n[1] Aging with GRACE: Lifelong Model Editing with Discrete Key-Value Adaptors\n[2] VLKEB: A Large Vision-Language Model Knowledge Editing Benchmark" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "As those mentioned in weakness:\n\nDoes the visual reasoning goal in this approach offer substantial differentiation from MMEdit, given the test data's similarity in the form of QA? \n\nCould a more sophisticated method replace black images as negative samples to better define the balance between generality and locality?\n\nHow significant is the impact of using diffusion model-generated images for testing generality and locality, considering their variable quality? Do you verify the image quality by any means (especially human verification), check if the generated images could be used for test?\n\nWould using more recent model architectures, like those in the LLaVA series, yield different results in these experiments?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The motivation is good, particularly the attention to balancing generality and locality in model edits. \nThe approach for setting the influence radius is straightforward, requiring no additional training, which enhances usability. \nAdditionally, the model demonstrates good efficiency in terms of both time and data requirements." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "BalancEdit introduces a new model editing approach, addressing the limitations of traditional model editing techniques. Unlike existing methods, which often ignore the distinct influence of different facts, BalancEdit strikes an optimal balance between generality and locality. By using a codebook of localized edits and generating both positive and negative samples, it accurately assesses each fact's impact without altering the model's core structure. Tested on the newly developed OKEDIT dataset, BalancEdit demonstrates robust editing performance with minimal trade-offs, marking a significant advance in multi-modal model editing." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The method’s visual reasoning goal is limited, offering little differentiation from MMEdit, especially as the test data format remains similar and is based on question answering. \n\nUsing a black image as a negative sample is simplistic and may fall short in defining an \"optimal balance between generality and locality.\" Consequently, the hyperparameter alpha is fixed, potentially limiting flexibility.\n\nImages in the generality and locality tests are generated by a diffusion model, which offers limited advancement over MMEdit due to inconsistent image quality.\n\nThe study uses Blip2-OPT and MiniGPT-4 as baseline models, which are somewhat outdated and limited. Architectures like LLaVA and related models may yield different results.\n\nWriting issue:\nThere is a major issue on page 10, lines 489-514, where two paragraphs convey the same information, likely due to an unintentional oversight.\nTypo: Line 723: “labelis”\nLine 862: missing reference\nTable 3: some bold texts are not best results\nThe example in figure 4 is confusing, because the first image and the rest two has significant difference, and the main subject is two people rather than a church." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "No Ethics Concerns" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "As those mentioned in weakness.\n\nAdditions:\n\nHow many locality test image for each editing case? If only one image, this can be imbalanced because generality test has 10 images for each.\n\nDo you verify the quality of generated images? How do you verify them?\n\nWhy don’t you present the results of SERAC method?\n\nWriting issue:\n\nTable 3: Misuse bold texts, some are not best results.\n\nA critical issue exists on lines 489-514, where two paragraphs redundantly convey the same information. This appears to be a significant oversight of the content.\n\nMissing reference in ine 862" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The motivation of balancing generality and specificity in model editing is good. \n\nThe method for determining the influence radius is simple and requires no extra training, which improves usability. \n\nMoreover, the model shows good efficiency in terms of both time and data usage.\n\nIntroducing more generality test for each editing case is beneficial." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "BalancEdit presents a new solution for updating large multi-modal models by achieving a balance between generality and locality in model edits. By introducing the OKEDIT dataset, this approach evaluates and addresses the generality-locality trade-off, a challenge overlooked by other methods. BalancEdit showcases minimal compromise in model performance, offering a robust and efficient approach to knowledge editing without altering the model's core weights." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Using a black or white image as a negative sample is straightforward but may not achieve an optimal balance between generality and locality.\n\nThe editing method involves finetuning a layer, which may be simplistic. Additionally, the experimental results lack comparison with the SERAC method.\n\nAbout image quality, I have some doubts on diffusion generated images, which are used as tests. From fig 4, the second image is totally different from the first image. From fig 6, the first and third examples of generality test are entirely different from the editing sample, making the test results questionable.\n\nThe experiments involve Blip2-OPT and MiniGPT-4. However, considering the fast development of MLLMs, the newer models like LLaVA series, which are widely recognized, should be tested." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024balancedit,\ntitle={BalancEdit: Dynamically Balancing the Generality-Locality Trade-off in Multi-modal Model Editing},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4WsHgA8EG1},\nnote={under review}\n}" }, "abstract": { "value": "Large multi-modal models inevitably decay over time as facts change and previously learned information becomes outdated. Traditional approaches such as fine-tuning are often impractical for updating these models due to their size and complexity. Instead, direct knowledge editing within the models presents a more viable solution. Current model editing techniques, however, typically overlook the unique influence ranges of different facts, leading to compromised model performance in terms of both generality and locality. To address this issue, we introduce the concept of the generality-locality trade-off in multi-modal model editing. We develop a new model editing dataset named OKEDIT, specifically designed to effectively evaluate this trade-off. Building on this foundation, we propose \\textbf{BalancEdit}, a novel method for balanced model editing that dynamically achieves an optimal balance between generality and locality. BalancEdit utilizes a unique mechanism that generates both positive and negative samples for each fact to accurately determine its influence scope and incorporates these insights into the model's latent space using a discrete, localized codebook of edits, without modifying the underlying model weights. To our knowledge, this is the first approach explicitly addressing the generality-locality trade-off in multi-modal model editing. Our comprehensive results confirm the effectiveness of BalancEdit, demonstrating minimal trade-offs while maintaining robust editing capabilities. Our code and dataset will be available." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Multi-modal learning", "Model editing" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/a92fd32526a55eab2c246a388e5aa9ece626237e.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "BalancEdit: Dynamically Balancing the Generality-Locality Trade-off in Multi-modal Model Editing" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4WvCoXU2dF
SymMaP: Improving Computational Efficiency in Linear Solvers through Symbolic Preconditioning
main
Active
Matrix Preconditioning;Symbolic Learning;Linear System Solver
applications to physical sciences (physics, chemistry, biology, etc.)
3;3;5
5;3;5
1;2;3
1;2;2
1;2;3
3.666667
4.333333
2
1.666667
2
0.5
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Some questions are in the previous section on the weaknesses of the submission; other questions are given below.\n\n1. What are alternative approaches to constructing optimal preconditioner parameters? Please add a paragraph to place your work in the context of learning preconditioners from data or similar techniques. For example:\n- https://proceedings.mlr.press/v202/li23e.html \n- https://sc18.supercomputing.org/proceedings/workshops/workshop_files/ws_lasalss102s2-file1.pdf\n- https://arxiv.org/abs/2405.15557 \n- https://arxiv.org/abs/2401.02016\n- https://arxiv.org/abs/1806.06045\n\n2. What are the spectrum properties of the preconditioned matrix with the generated preconditioner? It would be interesting to observe whether they only reduce the condition number or additionally increase spectrum clustering. Condition numbers for the preconditioned matrices are presented in Tables 3 and 6, but only for limited types of PDEs.\n\n3. What was the $\\epsilon$ parameter used in experiments, and does it significantly affect the training runtime/performance?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Disclaimer: I am not an expert in symbolic regression, so advances in the study from this perspective could not be well-identified. \n\nThe manuscript's main strength is its attempt to apply the general symbolic regression technique to the preconditioner construction problem. The presented pipeline looks non-trivial, although the objective function is standard. In experiments, the presented approach generates preconditioners that establish faster convergence of the linear solver. In addition, the SyMMap could reconstruct the optimal expression for the $\\omega$ in SSOR for positive definite matrices." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The manuscript presents a novel approach to quasi-optimal preconditioner construction based on symbolic regression. The authors perform extensive numerical simulations to identify the optimal parameters for given linear systems through grid search. The pairs (a parameter for a linear system and the corresponding optimal preconditioner parameter) compose the training dataset. Then, the combination of RL trainer and RNN for symbolic regression fits the analytical expression for the optimal preconditioner parameter. Experimental results demonstrate that the presented pipeline gives such a preconditioner that, on average, the runtime for linear solvers is smaller for different PDE classes." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The main weakness of this study is that it misses the crucial step of using the derived symbolic expression to generate the optimal preconditioner. I have carefully checked Figure 2 and do not find an explicit connection between the expression $\\tau$ and the compiled preconditioner in the library. Some remarks are given in Section 5.3; however, the presented expressions depend on unclear variables $x_1, x_2, x_3$, so how to use them in preconditioner construction is unclear. \n\nIn addition, I can list the following weaknesses:\n1. The manuscript does not explicitly present the parametrizations of considered PDEs and how these parameters are passed as input to RNN. Also, the authors ignore details on how training and testing sets are prepared.\n2. I did not find the name of the linear solver used to evaluate the preconditioners, e.g., CG, GMRES, or smth else.\n3. The incomplete Cholesky/LU preconditioner is not included in the comparison, although it is among the most powerful.\n4. The authors do not report the runtime for training the presented pipeline (although for Darcy, the runtime is given in Table 6). Moreover, they do not discuss how many linear systems are needed to solve with the generated preconditioner to pay off the training costs compared to classical approaches like SSOR with the optimal parameter or ILU. Tables 1 and 2 show a gain in runtime, which is good, but how much time does the training of symbolic regression require? \n5. For unknown reasons, the authors include a comparison with MLP. However, a more interesting comparison is replacing RNN with Transformer architecture and analyzing the results in the performance of linear solver and training runtime. \n6. No theoretical guarantees on the performance of such an approach or motivation of the presented pipeline are presented, so the robustness of this approach remains unclear." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The proposed framework involves defining the optimal preconditioning parameters, subsequently searching for symbolic expressions, and integrating these expressions into the modern solver." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce Symbolic Matrix Preconditioning (SymMaP), a method that identifies symbolic expressions for efficient preconditioning parameters. These generated expressions can be seamlessly integrated into modern solvers with minimal computational overhead." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* Although the application of the proposed approach in this context may be novel, the reasoning behind why it could outperform a class of preconditioners based on graph neural networks (https://proceedings.mlr.press/v202/li23e.html, https://arxiv.org/abs/2405.15557) is not evident.\n* The authors test their framework using three datasets (Biharmonic, Darcy Flow, and Elliptic PDE) with minimal variation in matrix size. It would be beneficial to validate their approach using the SuiteSparse Matrix Collection (https://sparse.tamu.edu/).\n* I found some sections a bit challenging to follow. Could the authors consider reorganizing the paper or providing more detailed explanations for each step of SymMaP to enhance clarity?\n* The values in the columns labeled \"SymMap 1\" and \"SymMap 2\" in tables 1, 2, 3 are not clear and would benefit from additional information to clarify the results.\n* A comparison of the performance in a CPU environment using an MLP is not adequate to make a definitive assertion `symbolic expressions possess equivalent expressive capabilities to neural networks in this scenario, effectively approximating the optimal parameter expressions`." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- The authors use the condition number as the evaluation metric for the AMG experiments, which is hard to compute for large matrices. Why not use computation time instead, which is the most straightforward metric? Would the conclusion be different depending on which metric to use? Note that condition number does not necessarily correlate with time, since coarsening generally poses tradeoffs between preconditioning time and convergence speed.\n\n- Continued from the above question: When constructing the training set for AMG, did the authors use time, iteration, or condition number to determine optimal parameters? \n\n- How do the following three times compare: time of generating the training data, time of training the symbolic regression, and time of one preconditioned solve?\n\n- How many data points are needed to train symbolic regression?\n\n- For the interpretability analysis in 5.3, do the expressions come from SymMaP 1 in Tables 1 to 3? Are there trade-offs between the preconditioning performance of a symbolic formula and the interpretability?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper addresses practical problems.\n\n- The symbolic approach has the potential to reveal the relationship between PDE parameters and preconditioner parameters, leading to new mathematical discovery and analysis.\n\n- The symbolic regression performance is competitive with neural network regression." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the problem of matrix preconditioning, which is a crucial ingredient in the iterative solution of linear systems. In particular, the authors focus on the parameters of some preconditioners and propose a machine-learning approach to determining these parameters. They base their approach on parameterized PDEs, such that it will predict preconditioner parameters given PDE parameters. The authors construct a training dataset and use it to learn symbolic regression formulas for these parameters. They argue that symbolic regression is more efficient and interpretable than other regression techniques." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The empirical evaluation of the proposal is conducted on either too simplistic but less effective preconditioners (SOR and SSOR), or a widely used preconditioner (AMG) with only one single parameter. It would be more informative if the authors experimented with more AMG parameters (such as the choice of the smoother and other coarsening parameters) and conducted a sensitivity analysis.\n\n- The generation of training data is costly: (# data points) * (# grid searches) * (cost of one preconditioned solve)\n\n- The training of symbolic regression can also be costly because of the sample efficiency of reinforcement learning.\n\n- It needs to be clarified of the relationship between genetic programming in section 2.2 and the rest of the paper." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024symmap,\ntitle={SymMaP: Improving Computational Efficiency in Linear Solvers through Symbolic Preconditioning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4WvCoXU2dF},\nnote={under review}\n}" }, "abstract": { "value": "Matrix preconditioning is a crucial modern technique for accelerating the solving of linear systems. \nIts effectiveness heavily depends on the choice of preconditioning parameters. \nTraditional methods often depend on domain expertise to define a set of fixed constants for specific scenarios. \nHowever, the characteristics of each problem instance also affect the selection of optimal parameters, while fixed constants do not account for specific instance characteristics and may lead to performance loss.\nIn this paper, we propose **Sym**bolic **Ma**trix **P**reconditioning (**SymMaP**), a novel framework based on Recurrent Neural Networks (RNNs) for automatically generating symbolic expressions to compute efficient preconditioning parameters. \nOur method begins with a grid search to identify optimal parameters according to task-specific performance metrics. \nSymMaP then performs a risk-seeking search over the high-dimensional discrete space of symbolic expressions, using the best-found expression as the evaluation criterion. \nThe resulting symbolic expressions are seamlessly integrated into modern linear system solvers to improve computational efficiency.\nExperimental results demonstrate that SymMaP consistently outperforms traditional algorithms across various benchmarks. The learned symbolic expressions can be easily embedded into existing specialized solvers with negligible computational overhead. Furthermore, the high interpretability of these concise mathematical expressions facilitates deeper understanding and further optimization of matrix preconditioning strategies." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Matrix Preconditioning", "Symbolic Learning", "Linear System Solver" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/1045762bf83eedff7572c541e7db818a4a3046e7.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/851136c5b9bf2bb155ad87b89090e31abf5c9e55.zip" }, "title": { "value": "SymMaP: Improving Computational Efficiency in Linear Solvers through Symbolic Preconditioning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4X9RpKH4Ls
Can Transformers Do Enumerative Geometry?
main
Active
AI for Mathematics;Algebraic Geometry;Theorem Discovery;Transformers;Recursive functions;Interpretability Analysis and world model.
applications to physical sciences (physics, chemistry, biology, etc.)
3;3;5;5
2;4;2;3
2;3;3;2
1;2;3;3
2;4;2;2
4
2.75
2.5
2.25
2.5
-0.301511
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* What is the significance of $\\hbar$ in the quantum Airy structure? How is it relevant specifically to training the DynamicFormer?\n* Figure 3 may be a discrete sampling of an underlying (continuous?) map that gives an $R^2$ for each $A$, with a maximum at $A=2/3$. Can the authors characterize this map?\n* Figure 4 shows a significantly weaker causal impact of $B$ on the number of intersection points, compared to $n$ and $d$. Though the authors call this unexpected in section 5's last paragraph, is there any explanation regarding the weak causal impact of $B$?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* The new DRA functions, motivated by the evidence presented in the paper, are a significant contribution that may interest machine learning scientists.\n* Training a DynamicFormer to predict $\\psi$-class intersection numbers, which then allows one to investigate a system's deeper geometry, is a significant, novel contribution that will interest mathematicians investigating enumerative geometry.\n* The use of Conformal Prediction to estimate uncertainty provides a concrete measure of confidence in the experimental results, contributing to the paper's soundness.\n* The figures are clear and high-quality, with informative captions.\n* The writing is clear and mostly organized, including the mathematical background, methodology, and results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes and tests the usage of transformers in the field of enumerative geometry, specifically regarding topological recursions and $\\psi$-class intersection numbers. To accomplish this, the paper proposes a new class of activation functions called Dynamic Range Activators (DRAs), and presents evidence of their performance in predicting a simple recursive function as part of a fully connected neural network, and then their ability to predict $\\psi$-class intersection numbers as part of their DynamicFormer architecture. The paper then attempts to investigate the trained DynamicFormer to see if it can predict other concepts in enumerative geometry, including the Dilation equation that stems from Virasoro constraints, as well as the asymptotic behavior of $\\psi$-class intersection numbers using abductive reasoning, verified using counter-factual intervention." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "### Section 2\n* The equations in this section use $\\hbar$ without defining it in the text. It may be worth explicitly calling it the reduced Planck constant in the text.\n* The last paragraph mentions excluding the tensor $C$ due to a decreased impact on the computed $\\psi$-class intersection numbers, observed during experimentation. Appendix C justifies this exclusion, yet is not referenced in the text, making for seemingly unsound reasoning for excluding $C$. The authors should consider referencing appendix C here to further justify the exclusion of $C$, and expanding on this point within appendix C proper.\n\n### Section 3\n* In the first two paragraphs, the paper presents the DynamicFormer for the first time and references a figure placed within an unrelated appendix, resulting in a disjointed reading experience. The authors may consider moving some parts of section 3 (such as its first two paragraphs) into a new appendix showcasing the DynamicFormer in detail and including the figure close by.\n* In the same paragraphs, the authors use the initials COO without previously defining them. These initials seem to appear nowhere else in the main text, and only in appendix B are they defined as Coordinate List. Besides hurting the paper's readability, this seems to be an implementation detail that does not need to appear in the main text.\n* The last paragraph mentions the [DYN] registry tokens, but fails to reference appendix B1. It may be appropriate to reference it here.\n\n### Section 5\n* Equation 5.4 is presented without proof, with the authors claiming they used an approach described in Eynard et al. (2023). A sketch of the proof (perhaps in an appendix) will contribute to the work's soundness.\n* Figure 3, and the relevant experiment, are based on the assumption that $A$ is rational. The authors should consider justifying the choice of testing only rational values of $A$, perhaps by connecting it back to equation 5.3, as proven by Aggarwal (2021).\n* Figure 3 presents a significantly higher value of $R^2$ for $A=2/3$ compared to the values for $A=4/6$ and $A=6/9$, despite being identical numbers. This issue does not appear for other such sets of identical rational numbers, such as $A=3/4$ and $A=6/8$. Since the rest of the subsection on Abductive Reasoning relies on $A=2/3$ being the correct answer, **this error calls the entire subsection into question and significantly hurts the paper's soundness and overall rating**. The authors must justify how the $R^2$ of $A=2/3$ is different from the other two values, or replace the figure (and perhaps rewrite some of the supporting text). Based on the other values of the figure, it should be expected to see a maximal $R^2$ around $A=2/3$, but without such a significant jump.\n\n### Typos\n* Section 5.1 line 319: \"The topological recursion formula equation 2.4 [...]\". Consider removing either \"formula\" or \"equation\", or placing all of \"equation 2.4\" in parentheses.\n* Section 5.1.1. has multiple citations included in sentences with their parentheses. The ICLR 2025 formatting instructions (section 4.1) require such references to not have parentheses except around the year. The references in question appear in lines 371, 378, and 381.\n* Section 5.1.1 line 417: \"As a result, We find an evidence [...]\". \"We\" does not need to be capitalized, and \"an\" should be removed.\n* Appendix C line 950: \"Figure 6 shows (s) numerical [...]\".\n* The title of appendix D and the caption of figure 7 both mistakenly write Princip**le** Component Analysis instead of Princip**al** Component Analysis." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "+ I wonder why the authors choose transformers as the regression function? \n+ In Figure 1, have you tried to apply DRA to MLP or other potential neural networks?\n+ Will the code and the datasets be available?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "+ The idea of using transformers to do enumerative geometry is new. \n + Meanwhile, the authors proposed a new activation function, DRA, which found to be useful to improve the prediction performance.\n + The authors compared DRA with other popular activations functions in Figure 1 and Table 2.\n\n+ Experiments show some evidence of transformers can learn to predict the $\\psi$-class intersection numbers.\n + Meanwhile, the authors also presented a discussion on how transformers being able to achieve that by inspecting internal vector space of the model.\n\n+ The author also investigated how inputs affect the model’s understanding of $\\psi$-class intersection numbers and the parameters for large genus." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduced DynamicFormer to learn and predict the $\\psi$-class intersection numbers. Experiments include both in-distribution results and out-of-distribution results. The author also presented some experiments to illustrate how transformers perform enumerative geometry. Meanwhile, the authors also investigated whether the proposed method could perform abductive reasoning and hypothesis testing to estimate the parameters of asymptotic form for intersection numbers." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I am not an expert in \"enumerative geometry\". However, I think the paper lacks many important clarifications and discussions.\n \n+ The paper lacked discussion of the reasons/motivations of using transformers. At the moment, the paper seemed only a combination of a popular neural network architecture and a new mathematical problem.\n+ The \"Related Work\" section is quite weak at the moment: the authors spent only one paragraph to discuss related works and then summarized their contributions.\n+ From my perspective, the proposed DRA is not the only way to capture the periodic behavior in data. This lacks sufficient discussion in the paper.\n+ From experiments in Figure 1, the authors did not apply DRA to other neural network architectures (eg MLP), and provided readers with more discussions on that.\n+ Lack of theoretical discussion on the proposed method.\n+ Code is not available." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "It would be good if the authors could at least mention questions that would bring something interesting to the enumerative geometry (something feels interesting when they perform an analysis of the internal representation of the network, but it stops just before it gets interesting...)." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The care and clarity of the writing, the fact that some extensive research has been done, the general trust in the results that this paper inspires." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors investigate the ability of transformers to compute the psi-intersection numbers in geometry and found that they perform (unsurprisingly) very well in distribution and quite well outside of distribution, and performed a series of analyses to understand which structures are being learned this way. They find in particular that the model learns the Dilaton equation and some information about the exponential growth of these psi-intersection numbers.\n\nOverall, this paper is written very carefully, with excellent explanations of what is being done, although the importance of some details is unclear (like: what do we need to know about psi-intersection numbers? most of the sophisticated formulae are not really used in a meaningful way). However I am not sure the work is very interesting from a geometric point of view (the interesting thing is to gain theoretical insight into what psi-intersection numbers, not get somehow numerically accurate estimates of them) or from a machine learning point of view (it is not clear what is more interesting about these numbers than about any sequence in the OEIS, say). The experimental results are not particularly surprising given what is known about transformers, or at least I don't see it. \n\nWhile this work can be viewed as a first step towards making progress in applying machine learning to enumerative geometry, and the carefulness of the writing and experiments should be commended, I don't think it brings a lot of interesting new informations about machine learning or enumerative geometry." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "What do we learn about machine learning or enumerative geometry? We seem to learn something that could be expected, a particular case of a general phenomenon." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "*Recommendation*\n\nI recommend to reject the paper mainly because I believe ICLR is not a suited venue, both for referring this paper (this paper needs to be reviewed by at least one expert in enumerative geometry, I don't know if there are such reviewers at ICLR) and for disseminating it (a journal in the field of computational enumerative geometry may be more suited). Furthermore, in my opinion the presentation of the material can be improved in several aspects before publication.\n\n \n*Comments and questions*\n\n- To which extent incorporating the conformal prediction framework in your analysis necessary? I am afraid this adds an additional layer of complexity that further hinders the communication of your findings. Maybe this discussion should be deferred to the appendix, keeping only what is strictly necessary to understand the main conclusion of your experiments in the main paper. \n\n- I don't understand the paragraph on top of p. 7, and I don't think this is due to my lack of expertise in enumerative geometry. In particular, what does \"the neural network embedding p_g,n ... is a vector space\" means ? How can a function be a vector space? What does \"go to the inner product space\" means? These are (to me) very loose nonsensical mathematical statements.\n\n*Minor comments & typos*\n\n- p.3 the acronym COO has not been introduced\n\n- Figure 5 should be included in the main part of the paper. In general, avoid forward references to far away, especially in the appendix without mentioning that it is in the appendix. \n\n- Use capitalization when reference tables, figures, sections, equations, etc. in the text (no capitalization needed when referring to figures or tables in general). E.g. Figure lines 168, 197, 334, Section lines 204, Table lines 274,298, Equation lines 319, 389 ... ... \n\n- line 196 we -> We\n\n- line 421: the sentence \"The interesting thing is that this is the performance of the non-linear probe.\" could be rephrased to better suit a formal publication." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "(S1) Investigating to which extent the recent successes of transformer models can transfer to other tasks, such as the one of solving fundamental problems in mathematics, is worthwhile and relevant." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Unfortunately, I have no expertise at all in computational enumerative geometry. My review will thus be quite superficial.\n\n*Summary*\n\nThis paper proposes to use transfomer models to tackle what I understood is a central problem in enumerative geometry: computing the phi-class intersection numbers on the moduli space of curves. From my pretty crude rudimentary and pragmatical ML perspective, the authors reduce this problem to learning a multi modal function mapping input tuples of the form (quantum Airy structure datum [a tensor / sequence of tensors], genus [integer], number of marked points [integer], partitions [permutation-invariant set]) to output intersection numbers [sequence of integers (?)]. The model is trained on solutions computed using brute-force methods up to some genus, and evaluated on its ability to extrapolate to find solutions for higher genus (geni?). \n\nThe main technical contribution of the papers are methodological and consist in \n\n(i) designing a specific multi-modal transformer architecture suited to the problem at hand (combining mostly existing models / techniques)\n(ii) introducing a novel activation function specifically suited to model recursive functions, which are crucial to solve the problem. \n\nExperiments on synthetic data are provided demonstrating that the model seems to be able to extrapolate to higher geni than the ones seen in the training data. The authors also provide some more qualitative analysis to investigate to which extent the internal representations learned by the model encode mathematical structures that are known to be relevant to solve the problem." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(W1) The relevance and technical aspects cannot be well understood / evaluated unless the reader has some non-trivial background knowledge of enumerative geometry.\n\n(W2) The writing and exposition of the material can be improved." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024can,\ntitle={Can Transformers Do Enumerative Geometry?},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4X9RpKH4Ls},\nnote={under review}\n}" }, "abstract": { "value": "We introduce a Transformer-based approach to computational enumerative geometry, specifically targeting the computation of $\\psi$-class intersection numbers on the moduli space of curves. Traditional methods for calculating these numbers suffer from factorial computational complexity, making them impractical to use. By reformulating the problem as a continuous optimization task, we compute intersection numbers across a wide value range from $10^{-45}$ to $10^{45}$. To capture the recursive and hierarchical nature inherent in the intersection numbers, we propose the Dynamic Range Activator (DRA), a new activation function that enhances the Transformer's ability to model recursive patterns and handle severe heteroscedasticity. Given precision requirements for computing $\\psi$-class intersections, we quantify the uncertainty of the predictions using Conformal Prediction with a dynamic sliding window adaptive to the partitions of equivalent number of marked points. Beyond simply computing intersection numbers, we explore the enumerative \"world-model\" of Transformers. Our interpretability analysis reveals that the network is implicitly modeling the Virasoro constraints in a purely data-driven manner. Moreover, through abductive hypothesis testing, probing, and causal inference, we uncover evidence of an emergent internal representation of the the large-genus asymptotic of $\\psi$-class intersection numbers. These findings suggest that the network internalizes the parameters of the asymptotic closed-form formula linearly, while capturing the polynomiality phenomenon of $\\psi$-class intersection numbers in a nonlinear manner." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "AI for Mathematics", "Algebraic Geometry", "Theorem Discovery", "Transformers", "Recursive functions", "Interpretability Analysis and world model." ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/809d8cb1b30de7df758a3105d487dbafe7cee587.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Can Transformers Do Enumerative Geometry?" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4XHyThqt1C
Alternating Optimized Stochastic Vector Quantization in Neural Compression
main
Active
vector quantization;neural compression;image compression
applications to computer vision, audio, language, and other modalities
3;3;3;5
5;4;4;4
2;2;3;3
2;2;2;2
3;1;2;3
3.5
4.25
2.5
2
2.25
-0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- For the natural image setting with the proposed method, are the transforms from Balle et al 2018, and entropy model the discrete entropy model from VQVAE? \n- Why can the proposed method not be applied to architectures like NVTC [1] or PQ-VAE [2]? This is not explained, and it seems like the proposed method could be used on these architectures.\n\nReferences:\n\n[1] El-Nouby, Alaaeldin, et al. \"Image compression with product quantized masked image modeling.\" arXiv preprint arXiv:2212.07372 (2022).\n\n[2] Feng, R., Guo, Z., Li, W., & Chen, Z. (2023). NVTC: Nonlinear vector transform coding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 6101-6110)." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The problem of train-test mismatch and other issues of STE in VQ-based models is relevant and timely\n- The proposed method appears principled, and solves some of the challenges that are presented\n- The work is overall well-motivated, and easy to follow" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates an improvement to the STE method used to train VQ-based neural compressors. For scalar quantization methods, the uniform additive noise method during training is shown to yield smooth gradients. This is not applicable to VQ-based methods, which so far mostly use STE. This is shown to yield highly non-smooth gradients. The proposed method, for VQ-based models, uses an alternating optimization scheme, combined with stochastic VQ. This is shown to yield smoother gradients than STE. Experimental results demonstrate superiority over STE-based VQ neural compressors." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- In sections 1-2, the problem is presented well, i.e., the need to solve some issues brought forth by STE in VQ-based compressors. However, section 3 dedicates a lot of explanation to how it is solved in scalar quantized neural compressors, which, to me, appears less important. In 3.2, I think it would be helpful to directly mention the VQ-STE section, as that is the setting which this paper's proposed method attempts to improve on. The UQ-AUN and UQ-STE can be mentioned briefly and details put in the appendix, as the scalar quantization setting is not the focus of the paper. This would provide more space to explain some of the details of the proposed method in section 4, which I found to be lacking. In addition, Figure 6 could be placed in section 4, and the reader can directly contrast that with Figure 4, and see how the non-smoothness issue is fixed via the proposed method. \n- The experimental results section covers a broad range of sources, both synthetic and real-world, which is helpful. It is shown that the proposed method outperforms VQ-STE in all settings, and the UQ-AUN method provides a frame of reference. However, some baselines are missing. For example, the two methods soft-toward vector quantization (A2) and probabilistic vector quantization (A3) used in the ablation study (lines 509-511) should also be its own baselines with the Balle et al 2018 transforms. This is useful for understanding how the proposed method compares with other methods that don't use STE. Moreover, these baselines are mentioned in the related work but not compared to. \n- In the related work, lines 138-140, it is said that section 3.2 addresses how prior works in VQ-based neural compression yield sub optimality. However, in the VQ setting, only the STE method from VQVAE is addressed. The method from Agustsson et al, 2017, and Zhu et al 2022 are not addressed in section 3.2. It would be helpful to understand how these two methods' gradients look like in the 1-d Gaussian setting. This, combined with a BD-rate comparison in the results section, would help the reader understand how all the methods compare (conceptually and performance-wise), and strengthen the work overall.\n- Furthermore, the experimental results of the proposed method on natural images use a fairly old architecture (which, to my understanding, uses transforms from Balle et al 2018, single-layer vector quantizer, and a discrete entropy model from VQVAE). There are more recent transforms that are higher-performing, such as those from [1], as well as vector quantizer layers, such as those from [2] and [3]. Experiments using these models would be more convincing. The authors say the proposed method cannot be used on more state-of-the-art models such as these. If true, I think that limits the applicability of the proposed method. \n- There are some issues with the references in the related work, in the second paragraph.\n\nReferences:\n\n[1] Cheng, Zhengxue, et al. \"Learned image compression with discretized gaussian mixture likelihoods and attention modules.\" Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. 2020.\n\n[2] El-Nouby, Alaaeldin, et al. \"Image compression with product quantized masked image modeling.\" arXiv preprint arXiv:2212.07372 (2022).\n\n[3] Feng, R., Guo, Z., Li, W., & Chen, Z. (2023). NVTC: Nonlinear vector transform coding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (pp. 6101-6110)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. What is the single-layer factorized model? Is it the encoder with a single layer, or is it the factorized entropy model with a single layer? The description of the network architecture is not clear in the paper.\n\n2. Please provide more details on the optimization of quantization boundaries. When the codebook is fixed, the decoder network and the entropy model are fixed, and the quantization boundaries depend on the codebook centers. How are the boundaries defined? Is it with respect to nearest-neighbor-based partitioning? When the encoder is optimized, the encoder might move the latent into a different partition. Is this what is meant by the optimization of quantization boundaries?\n\n3. The rate of the baseline methods is controlled by adjusting the codebook sizes. Why is the entropy model not used for the baseline methods in the comparison? Even though the baseline methods do not consist of the entropy model, it is better to include the entropy model. The BD-rate gain for the proposed method could also come from the use of the entropy model, in addition to the proposed vector quantization method. The baseline method with the entropy model might also have similar results to the proposed method. If the baseline method also includes the entropy model, it will be easier to quantify the improvement of the proposed vector quantization.\n\n4. In Table 1, for the baseline UQ-AUN (Factorized model Balle et al. (2018b)), is the hyper-prior entropy model used, or is the citation incorrect? In the text, it is written as the factorized entropy model, but it is cited with the hyper-prior entropy model: Johannes Balle, David Minnen, Saurabh Singh, Sung Jin Hwang, and Nick Johnston. Variational image compression with a scale hyperprior. arXiv preprint arXiv:1802.01436, 2018b." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. An alternative optimization procedure to optimize the encoder network, the codebook of vector quantization, and the decoder network. This procedure could result in better convergence of the RD loss function.\n2. An approximation of vector quantization using uniform spherical noise centered on the latent vector.\n3. A gradient analysis of the encoder latent with respect to the loss function.\n4. Deriving the correspondence between vector quantization in the latent space and the corresponding quantization in the image space." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors propose an optimization strategy for vector quantization in neural compression. Since quantization is non-differentiable, they approximate the vector quantization error using noise sampled from a uniform spherical noise distribution. Additionally, they introduce an optimization strategy to effectively minimize the rate-distortion loss function in neural compression. The authors tested their method on simulated data sources and several real-world images, demonstrating that their approach provides better compression efficiency compared to existing vector quantization methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.The paper is not well-written and is incomplete in several sections. In the related work section, citations are missing, and sentences are incomplete, making it difficult to relate the written content to the prior art. Few of the papers in the reference are repeated.\n\n2. The evaluation of the proposed vector quantization is limited. The authors have only experimented with a low-complexity autoencoder using a single layer. Consequently, the impact of the proposed method on neural compression is limited. The authors should utilize recent state-of-the-art variational autoencoder-based neural image compression methods, such as [1] and [2], and apply the proposed vector quantization to the latent space of these advanced methods. When the encoder and decoder are more powerful, the impact of vector quantization on reducing the bitrate might be lower than what is shown in the paper.\n [1] Cheng et. al, Learned Image Compression with Discretized Gaussian Mixture Likelihoods and Attention Modules, CVPR 2020\n [2] He et.al, ELIC: Efficient Learned Image Compression with Unevenly Grouped Space-Channel Contextual Adaptive Coding, CVPR 2022.\n\n3. The details of the network architecture are missing from the paper.\n\n4. The alternative optimization strategy is well-established in the vector quantization literature, where the codebook is fixed while optimizing the encoder and decoder. Additionally, in neural compression, some prior works [3] perform fine-tuning of the decoder using the quantized latent \\hat{y}​, showing that optimizing the decoder with the quantized latent improves compression efficiency and reduces the train-test set mismatch. The citations are missing.\n [3] Zongyu Guo et.al, Soft then Hard: Rethinking the Quantization in Neural Image Compression, ICML 2021\n\n5. The citations to the related work (baseline) are incorrect (e.g., in Table 1), making it difficult to review the paper." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. What are the advantages of the proposed encoder-decoder alternating optimization strategy over mixed quantization method?\n\n2. Could the authors theoretically prove that the assumption of $q(\\tilde{y}|y)$ being a uniform sphere distribution centered at $y$ is valid?\n\n3. Could the performance of the proposed model achieve state-of-the-art results?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is well written and easy to follow.\n\n2. The proposed stochastic vector quantization for encoder optimization approach is superior to the previous VQ+STE approximation method as well as the UQ+AUN method, as demonstrated in experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper addresses two main issues of vector quantization (VQ) approximation methods in neural compression. The paper proposes encoder-decoder alternating optimization strategy to address the train-test mismatch and stochastic sphere-noise based approximation technique for suboptimal encoder gradients for rate-distortion (R-D) optimization. Experimental results on synthetic sources and natural images demonstrate the effectiveness of the proposed method over previous VQ approximation methods in terms of R-D performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The proposed encoder-decoder alternating optimization strategy is of less importance. Recent neural compression methods address the train-test mismatch issue in end-to-end training by adopting mixed quantization. That is using additive uniform noise for learning the entropy model but employing quantized latent when it is passed to the decoder. There is no evidence that the encoder-decoder alternating optimization strategy is better than the mixed quantization method. Moreover, as the authors illustrated, the proposed alternating optimization strategy is only applicable to single-layer quantization and unconditional entropy models, which leads to obviously degraded R-D performance.\n\n2. In the proposed stochastic vector quantization approach, the authors assume $q(\\tilde{y}|y)$ is a uniform sphere distribution centered at $y$. However, there is no theoretical evidence to support that this assumption is reasonable. \n\n3. In experiments:\n\n(1) For low-dimensional vector sources, it is not reasonable for the dimension of the latent-space vector to be the same as that of the source-space vector, as the primary task of the encoder is dimensionality reduction for feature extraction .\n\n(2) The specific structure of the entropy model of VQ-STE and the proposed method is not given. Due to the different entropy models, it is also unfair to compare the proposed method with UQ-AUN and UQ-STE.\n\n(3) The R-D performance of the proposed method is evidently worse than current state-of-the-art methods. It is even worse than BPG444." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. What is the main advantage of using noise that follows a uniform spherical distribution over conventional additive uniform noise?\n2. In the figures, the encoder transform of UQ-STE (Figure 3) includes the quantizer while that of UQ-AUN (Figure 2) does not. Why?\n3. What's the definition of hypersphere in this paper?\n4. Why the prior is optimized together with the decoder instead of the encoder in the alternate optimization? The distribution of the codeword is determined only by the boundaries, which are determined by the encoder.\n5. How to guarantee $\\Vert\\mathbf{\\mathit{y}}-\\hat{\\mathbf{\\mathit{y}}}\\Vert= \\Vert\\mathbf{\\mathit{y}}-\\mathbf{\\mathit{e}}_i\\Vert=\\Vert\\mathbf{\\mathit{y}}-\\mathbf{\\mathit{e}}_j\\Vert$ for the vector quantizer?\n6. Is the proposed alternating optimization method applicable to other NTC models, including those with uniform quantizers?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is overall well written and easy to follow.\n2. The authors provide a clear framework for analyzing the gradient approximation problem of NTC and propose a method for solving it based on the characteristics of vector quantization." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes an alternating optimization method that incorporates stochastic quantization to improve the quantization process of nonlinear transform coding (NTC). The paper clearly formulates the optimization problem of NTC from the perspective of vector quantization, *i.e.*, the optimization of boundaries and codewords. Experiments on low-dimensional sources and natural images show that the proposed method outperforms the classical NTC method equipped with additive uniform noise and straight-through estimator on image compression." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The motivations and advantages of employing a uniform sphere distribution are hard to understand. The uniform quantizer with additive uniform noise also approximates the encoder gradient with the difference in RD loss between adjacent quantization centers (which is the main advantage of the uniform sphere distribution), as shown in Eq. (4).\n\n By the way, I noticed that the proposed method uses a learnable multidimensional codebook instead of a fixed codebook of uniform quantizers. However, such a gap can be reduced by the nonlinear transforms (for flexible boundaries and codebook in the source space) and conditional coding (for redundant multidimensional signals).\n\n2. The importance of the proposed method seems to be limited. Vector quantization and conditional coding (*e.g.*, spatial auto-regression [R1] and channel-wise auto-regression [R2]) are two kinds of methods that solve the high-dimensional coding problem of latent representations, and the latter one is more prevalent in existing methods. Theoretically, the proposed alternating method can be used in both vector quantization and conditional coding. However, the authors only offer the results for vector quantization. It is better to evaluate the contribution of the proposed method by integrating it with state-of-the-art conditional coding methods, such as ELIC [R3] and TCM [R4].\n\n [R1] D. Minnen, J. Ballé, and G. D. Toderici. Joint autoregressive and hierarchical priors for learned image compression, In *Advances in Neural Information Processing Systems (NeurIPS) 31*, 2018, pp. 10771-10780.\n\n [R2] D. Minnen and S. Singh. Channel-wise autoregressive entropy models for learned image compression. In *2020 IEEE International Conference on Image Processing (ICIP)*, 2020, pp. 3339-3343.\n\n [R3] D. He, *et al.* ELIC: Efficient learned image compression with unevenly grouped space-channel contextual adaptive coding. *Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)*, 2022, pp. 5718-5727.\n\n [R4] J. Liu, H. Sun, and J. Katto. Learned image compression with mixed transformer-cnn architectures. *Proceedings of the IEEE/CVF conference on computer vision and pattern recognition (CVPR)*, 2023, pp. 14388-14397.\n\n3. Contributions on interpreting neural compression as vector quantization should be clarified. There has been work (Ballé *et al.*, 2020) that reveals the relationship between the source domain and the latent representation. Although this paper is cited by the authors in their related work, the relationship and contributions of the two papers are not clarified.\n\n4. Several details should be clarified in the manuscript to ensure that the paper is self-contained.\n\n - The implementation of vector quantization in the latent space, which is crucial to better understand the contribution of the proposed method.\n\n - The definition on the uniform sphere distribution.\n\n I note that there are two different definitions of hypersphere, with a difference in whether the points with a distance less than the radius are considered part of the hypersphere. It is suggested that the authors provide a clear definition.\n\n (Additional) 2 definitions, with the latter one be the case of this paper:\n\n a) The $(k-1)$-sphere with a radius $R$ is the set of points $[x_1, x_2, \\cdots, x_k]$ with $\\sum_{i=1}^kx_i^2 = R^2$.\n\n b) The $k$-dimensional hypersphere with a radius $R$ is the set of points $[x_1, x_2, \\cdots, x_k]$ with $\\sum_{i=1}^kx_i^2\\leqslant R^2$.\n\n5. Typos:\n\n - There are several omitted citations in the second paragraph of Section 2.\n\n - There is a redundant comma after “e.g.,” in Line 99.\n\n - The references are not cited with proper commands. Some of the citations need to be replaced by `\\citep` instead of `\\citet`.\n\n - There is an unnecessary bracket after $\\mathbf{\\mathit{y}}$ in Line 353." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024alternating,\ntitle={Alternating Optimized Stochastic Vector Quantization in Neural Compression},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4XHyThqt1C},\nnote={under review}\n}" }, "abstract": { "value": "In neural compression, vector quantization (VQ) is usually replaced by a differentiable approximation during training for gradient backpropagation. However, prior approximation methods face two main issues: 1) the train-test mismatch between differentiable approximation and actual quantization, and 2) the suboptimal encoder gradients for rate-distortion (RD) optimization. In this paper, we first provide new finds about how approximation methods influence the RD optimization in neural compression, and then propose a new solution based on these finds. Specifically, if a neural compressor is regarded as a source-space VQ, we find that the encoder implicitly determines the quantization boundaries, and the decoder determines the quantization centers. Suboptimal approximation methods lead to suboptimal gradients for RD optimization of quantization boundaries and centers. Therefore, to address the first issue, we propose an encode-decoder alternating optimization strategy. The encoder is optimized with differentiable approximation, and the decoder is optimized with actual quantization to avoid the train-test mismatch of quantization centers. To address the second issue, we propose a sphere-noise based stochastic approximation method. During encoder optimization, VQ is replaced with a uniform sphere noise centered at the input vector. When the input vector is located at the quantization boundary, the encoder gradient is closer to the difference in RD loss between adjacent quantization centers, facilitating better encoder optimization. We name the combination of optimization strategy and approximation method as Alternating Optimized Stochastic Vector Quantization.\nExperimental results on various vector sources and natural images demonstrate the effectiveness of our method." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "vector quantization", "neural compression", "image compression" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6eeab1498fa5d49fc75b14861f04868beaa48fff.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Alternating Optimized Stochastic Vector Quantization in Neural Compression" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4YpMrGfldX
Scaling Transformers for Low-Bitrate High-Quality Speech Coding
main
Active
Audio coding;neural audio codecs;transformers
generative models
3;5;5;8
4;5;4;5
2;3;3;4
1;3;3;3
3;2;2;3
5.25
4.5
3
2.5
2.5
0.70014
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- How does it compare to the HuBERT-based codec? HuBERT can be as large as the proposed model (its X-Large version), and can turn into a codec with a vocoder attached to it as shown in [a].\n\n[a] A. Polyak et al. “Speech Resynthesis from Discrete Disentangled Self-Supervised Representations,” Interspeech 2021" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The proposed system appears to work well according to the objective metrics and subjective tests.\n- The proposed FSQ idea seems to be a solid quantization option, improving the codebook utilization.\n- The authors put a lot of effort in making it more scalable by adding multiple levels of quantization." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a new Transformer architecture for speech coding. It is characterized by the new scalar quantization method performed in a dimension-reduced space, which showed improved coding gain compared to other methods that are based on residual vector quantization. The paper also provides various training strategies that appear to be useful for neural codec training." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The proposed method relies on the dimension reduction part for its dimension-specific scalar quantization to work. And that's why they could achieve higher codebook utilization. Meanwhile, there is also a trend that higher codebook utilization leads to lower coding gain if entropy coding is applied after tokenization. Indeed, the paper does not mention anything about Huffman coding results, which the proposed method might not be able to take advantage of due to the low dimensionality and high codebook utilization. At the same time, the RVQ-based ones might have a better chance of compressing more via Huffman coding. I wish the paper provided an in-depth discussion about it. In my opinion, all the coding gain and performance-related arguments must be based on the entropy-coded bitrates of all codecs mentioned. \n\n- The other main criticism is that the proposed model is just a lot bigger than the other models. I don't mean to argue that a bigger codec necessarily results in a better coding gain, but in general, it is also true that there is a relation. I wish the paper had provided an ablation test that investigated the impact of the different sizes of the proposed model. \n\n- The paper provides various tips and useful information about their model training, but they are scattered in different places without a clear organization." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "N/A" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "This paper is well written and very clear to follow. In the introduction part, it clearly presents the motivations and has an excellent survey of the existing methods. \n\nThough using transformers to scale and leverage FSQ for high codebook utilization is not something new, this paper presents the motivations of these changes, the associated challenges and their mitigations. This paper also introduces a new method so that FSQ can be used in a similar way as RVQ where a varying bits-per-second rate can be achieved. \n\nThis paper presents strong experiment results, significant improving over the existing baselines (but at the cost of increased computation and latency)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work describes an approach to leverage scaled transformer model architecture to achieve low-bitrate and high-quality speech coding. Different from conventional neural audio codec approaches, this work leverages the finite scalar quantization (FSQ) based bottleneck to achieve high codebook utilization rate and avoid the difficulty in training a VQ-VAE auto-encoder. Experimental results show this works outperform existing baselines in both objective and subject tests." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "If I understand the proposed model correctly, it is based on transformer layer with a local attention of 128 (both left and right), which means different from DAC/Encodec/Mimi etc which use causal encoders, the encoder in the proposed method is not causal, and it will introduce a latency up to the patch length (which is 320/16k ~ 20ms?). It would be great if the author can present the results with causal encoder so that it can be compared with DAC/Encodec/Mimi in a relative fair comparison (apart from the model size difference)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "(please see weaknesses above)" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The idea of using Transformer and the main architecture for the neural audio codec learning is novel and well executed.\n- Judging from the audio samples on the demo page and MOS study, TAAE is clearly state-of-the-art in low bit rate speech compression.\n- This paper provided a lot of detailed knowledge, empirical findings, and engineering improvements that can truly benefit the audio codec research community. I personally learned a lot in the details such as the discussion on systematic bias of discriminator, choice of filterbank, observation on the latent representation of silence frames with self-attention, etc.\n\n---\n\n### Justification for rating\nOverall, I believe this work is novel enough and provides solid contributions to the field.\nHowever, some improvement might be necessary (see weaknesses below).\nIf the authors can properly address these concerns and update the submission accordingly, I would be more than happy to raise my rating on this paper." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposed TAAE, an audio codec model that uses Transformer encoders as the main building block to replace conventional convolution-based modules. To accommodate the choice, TAAE performs downsampling mainly by patchifying the time domain signal and training transformer encoder stacks on top of the downsampled sequence. For discretizing audio, TAAE relied on FSQ-based bottleneck that approximates continuous low-dimensional latent numerically. Experiment results show TAAE achieved outstanding speech quality on autoencoding at a significantly lower bit rate comparing to existing models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Given the main contribution of this work is in exploring an alternative architecture for codec models, completeness in terms of design details and reproducibility are expected. In contrast, I found a lot of details missing or vague. (Although the authors state the code will be released later, the paper itself should still be comprehensive alone.) Here are some examples:\n\n---\n\n> ($\\S$2.1) ... Instead we raise the $\\epsilon$ constant used in the calculation of normalization factors in the layer norm blocks ... allows convergence of the architecture.\n\nThis appears to be an interesting observation and a critical hyper-parameter for training the model as the authors spent a paragraph discussing it, but neither the exact value nor the study/experiment on $\\epsilon$ is provided.\n\n---\n\n> ($\\S$2.4)...For training the codec itself, we primarily use a normalized feature-matching L1 loss on the per-layer features of the discriminator network ... In addition we found it beneficial to include a traditional L1 reconstruction loss to boost convergence at the beginning of the training process ...\n\nThe overall objective of the model is not explicitly given but described in a more hand-wavy style instead, which could easily lead to misunderstanding. The full objective should be listed explicitly together with the weight/importance for each term/component in the main paper or appendix.\n\n---\n\n> ($\\S$2.1) ... The self-attention uses a sliding window of size 128, to restrict receptive field and aid generalization of the architecture to arbitrary length sequences. \n\nThis choice appears as one simple sentence, but self-attention is the key difference between TAAE and prior works, which changes the properties of the model dramatically.\nIf my understanding is correct, this means the receptive field of the first layer is already 2.56 seconds (128 frames $\\times$ 20 ms-per-frame), and the number doubles for every layer. It is obvious that TAAE has a much larger receptive field size comparing to convolution-based models. While this is an advantage, it could also lead to some problems that are not discussed in the paper.\n \n- What is the trade-off between length generalization and sliding window size for TAAE? How do time complexity and empirical inference time change accordingly? How do these numbers compare to those of CNN-based models?\n- Beyond length generalization, can TAAE perform streaming encoding/decoding (as most of the existing works compared in this paper)?\n - If so, what is the size receptive field? how does it affect the latency of the codec? how does it compares to conventional CNN-based codec models?\n - If not, this should still be explicitly discussed as a limitation of the proposed framework in the paper.\n\nThese are just some examples. In short, I believe the fundamental differences between TAAE and CNN-based codec models should be discussed in the paper more throughout and carefully. Both advantages and disadvantages should be clearly stated and summarized in the main body of the paper.\n\n---\n\nI believe these concerns can all be addressed without additional training, thus should be easy enough to complete within the rebuttal period." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Why was SpeechTokenizer not included in the perceptual evaluation?\n\nHow do the design goals of Mimi align with that of TAAE? Why is Mimi a good baseline comparison to TAAE?\n\nWhy was a MOS evaluation chosen instead of MUSHRA?\n\nHow do you explain the gap in your perceptual evaluation MOS score and the estimate provided by MOSNet? \n\nWho was included in the perceptual evaluation, and what was their listening setup?\n\nHow does TAAE perform on non-English speech? And how does that compare to the more generalist NAC?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Originality:\nThe results presented in Appendix B are enlightening regarding the use of non-power-scaled FFT sizes. The FSQ-based bottleneck also seems to overcome common issues in the training of RVQ systems. \n\nQuality:\nThe authors provide a wide variety of objective assessments for their architecture's performance. The authors also do a good job of citing current literature.\n\nClarity: \nThe authors very clearly describe their architecture and the motivations for their architectural decisions. The appendices were well organized and helpful.\n\nSignificance:\nAppendix B and the FSQ bottleneck are worthwhile contributions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors present an encoder-decoder transformer-based architecture for encoding speech at very low bitrate, below 1kbps. The Transformer Audio AutoEncoder (TAAE) uses a polyphase time-frequency representation to perform downsampling/upsampling before the enocder and after the decoder of TAAE. Finite Scalar Quantization (FSQ) is employed within the encoder/decoder bottleneck to mitigate codebook underutilization typically seen with vector quantized (VQ) and residual vector quantized (RVQ) approaches. The authors combine an L1 discriminator feature loss with decaying L1 waveform loss and late perceptual reconstruction loss for training. The TAAE is trained on 105k hours of English speech sampled at 16kHz. The reconstruction capability of TAAE is compared to the Descript Audio Codec (DAC), Encodec, SpeechTokenizer, SematiCodec, and Mimi. A mean opinion score (MOS) is also produced from a perceptual evaluation comprised of 23 participants comparing TAAE to Mimi and SemantiCodec. The authors demonstrate that TAAE obtains better reconstruction performance according to both objective measures and MOS. The authors also demonstrate that one variant of the TAAE codebook attains 98% utilization." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I do not think it is novel to scale the parameter count of a neural network autoencoder and demonstrate better compression ratios/reconstruction compared to smaller architectures. This is a well known result.\n\nI do not think it is novel to restrict the domain of a neural network autoencoder in comparison to another architecture trained on a more general domain and demonstrate better compression ratios/reconstruction. This is a well known result.\n\nBy restricting the domain of their speech audio corpus to English speech, the authors have produced an English speech audio codec. In order to claim that this is a \"speech codec,\" the authors should evaluate on non-English speech to demonstrate generalization capabilities. \n\nI do not think DAC, Encodec, and SemantiCodec are reasonable baselines to compare to, as none claim to be English speech codecs. \n\nMimi focuses on streaming and causality with 1/10 the parameter count of TAAE, which makes no claims regarding streaming capability. This also leads to an odd comparison as the goals of Mimi and TAAE are not aligned. \n\nIt is unclear why SpechTokenizer was left out of the perceptual evaluation, as it is the most comparable to TAAE in terms of architecture and training domain. Comparison to SpeechTokenizer could also boost claims that the FSQ significantly outperforms RVQ schema. \n\nI think the presented MOS scores are confusing, as MOSNet has estimated the MOS closer to 3 than to 5. A MUSHRA evaluation should have been used instead for pairwise comparison between codecs and is standard in the literature cited in this paper. Furthermore, the authors should include demographic breakdowns of the perceptual evaluation, as well as a description of the listening setup, as is standard for speech codecs." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Speech codec using transformers to target extreme compression with good sound quality" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024scaling,\ntitle={Scaling Transformers for Low-Bitrate High-Quality Speech Coding},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4YpMrGfldX},\nnote={under review}\n}" }, "abstract": { "value": "The tokenization of audio with neural audio codec models is a vital part of modern AI pipelines for the generation or understanding of speech, alone or in a multimodal context. Traditionally such tokenization models have concentrated on low parameter-count architectures using only components with strong inductive biases. In this work we show that by applying a transformer architecture with large parameter count to this problem, and applying a flexible Finite Scalar Quantization (FSQ) based bottleneck, it is possible to reach state-of-the-art speech quality at extremely low bit-rates of $400$ or $700$ bits-per-second. The trained models strongly out-perform existing baselines in both objective and subjective tests." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Audio coding", "neural audio codecs", "transformers" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/7a6ea1407e4aff5918e22e466c5b4df083c0877b.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Scaling Transformers for Low-Bitrate High-Quality Speech Coding" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4YzVF9isgD
HyperFace: Generating Synthetic Face Recognition Datasets by Exploring Face Embedding Hypersphere
main
Active
Face Recognition;Hypersphere Optimization;Privacy;Synthetic Data
alignment, fairness, safety, privacy, and societal considerations
5;5;5;6
5;5;4;5
4;3;3;3
3;1;2;3
3;2;4;3
5.25
4.75
3.25
2.25
3
0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. What is the computation resource and time needed to generate larger scale datasets, e.g. n_id = 30k or more?\n2. It would be interesting to see if we can use the FR model trained by the proposed synthetic dataset to build another good synthetic dataset" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. Well-formulated optimization problem: The paper effectively defines the optimization problem for generating high-quality synthetic face recognition datasets.\n2. Efficient solution: The proposed solution is not only effective but also computationally efficient.\n3. Extensive experiments: The paper presents comprehensive experiments on various synthetic datasets to validate the approach.\n4. Ethical considerations: The authors acknowledge potential ethical concerns, such as identity leakage, demonstrating a responsible approach to AI development." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents an approach to generate a synthetic face dataset for face recognition problem by formulating it as an optimization problem and solve it iteratively via gradient descent method to obtain optimized embeddings. Then the synthesize face images can be generated using pre-trained face generator models from those embeddings.\nThe experiment shows that the models trained with the proposed synthetic datasets can achieve SOTA performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Limited scale: The experiments are restricted to relatively small-scale datasets (up to 30K identities). A more comprehensive evaluation on larger datasets would be desirable.\n2. Narrow focus: The paper's primary contribution is limited to improving inter-class variation. A broader scope, addressing intra-class variability or other relevant aspects, would enhance the impact." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. Are both reference embeddings and gallery embeddings generated from StyleGan? In this case the only difference between them is that the Gallery embeddings are not updated during optimisation?\n2. Are all methods in table 1 trained in the same way using the same face recognition model and training pipeline?\n3. Fair comparison in table 1 should use 50 images per identity for your method\n4. It’s important to compare against SOTA (e.g. DCFace) at scale (i.e. increasing the number of identities). Specifically, table 3 should not be just an ablation study but you need to show that your method scales favorably and/or outperforms SOTA as the number of training images increases. \n5. In general, how the method scales has been poorly studied (there’s only 1 result in table 3). Scaling Dataset Generation section discusses computational issues that may arise from scaling the dataset but does not provide concrete numbers (e.g. a figure showing training time vs dataset size), conclusions or practical solutions (i.e. a solution is proposed but not put in practice)\n6. Baselines: what about direct comparison with arc2face? Since they don’t have to solve your optimisation problem, they can generate a lot more images for training" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Interesting idea to perform the optimization in the latent space of a discriminatively train face recognition model\n- Well written paper, easy to read and understand\n- Decent experiments although lacking in some respects" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Interesting paper that proposes some embedding optimisation in the latent space of a pretrained face recognition model. The optimised embeddings are used for generating facial images using a recently proposed generative model, and then the generated images for training a face recognition model. I liked the novelty and simplicity of the proposed approach yet there are a few issues that possibly limit the impact of the proposed work. See my questions below." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "It could be that the method has significant limitations in terms of scaling the number of images that can be generated. The impact of the work has not be fully demonstrated. See also my questions below." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see the weakness. \n\nIf the authors address the concerns well, I am happy to raise my score." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper is good in writing and has a solid mathematical formulation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper propose an interesting solution for synthetic face recognition, i.e. optimizing the hyperspace for generation. The solution is to treat the face generation as a packing problem on the embedding space. The optimization goal is to find feature embedding that is of high inter-class variation. Finally this paper adopt a pretrained generation method to generate the dataset." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. [Major] I don't see the motivation to convert the face generation problem as a packing problem on the embedding space from the storytelling, Please provide some related empirical/ theoretical works regarding why the packing problem could be of use for SFR.\n\n2. [Major] This paper adopts Arc2Face for final image generation. However, (1) The ablation study doesn't show the advance of directly sending random feature embedding (each embedding is ensured by restricting the similarity below a certain threshold, e.g. 0.3) to Arc2Face; (2) The comparison with Arc2Face is missing in Table 1, additionally the experiment is marginal better than DCFace. The average performance is 89.876 which is similar to DCFace and dramatically lower than Arc2Face. \n\n3. [Major] In Section 'Solving the HyperFace Optimization', the authors choose AdamW for the optimization solution. However, the other alternative optimization methods are not specified and compared in this paper.\n\n4. Another concern is that the proposed method generates more images(640k) to produce similar performance to DCFace (500k).\n\n5. large intra-class variations can not be observed in the visualization section.\n\n6. [Minor] Notation is not specified in fig 1.2. Please provide more description for the reader to understand the mathematical formulation and the whole generation process. For example, what does reference embedding stand for, I understood it only when I saw the 'Image Generation' section. And what is X_{g}?\n\n7. Please give some detailed pseudo-code for the entire process(training/ generation) for the reader to understand the method." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "NA" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "My main concerns are threefold: (1) the lack of theoretical and empirical analysis on HyperFace optimization (Equation 1), (2) missing ablations and detailed analysis of results, and (3) understanding the limitations of the method in generating novel identities. I am hopeful these concerns can be addressed during the discussion period, and I am open to increasing my score based on the responses. Please also see the weaknesses" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "1. The paper is well-written and easy to follow.\n2. The empirical study shows that the proposed method generalizes well across different validation sets.\n3. Experimental results are promising, with satisfactory improvements on benchmark datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a synthetic data generation method for FR that aims to improve inter-class variation compared to existing methods. The approach utilizes the embedding space of a pretrained FR model to create an initial gallery, then optimizes these embeddings to uniformly position identities on a unit hypersphere. A conditional diffusion generator is subsequently used to synthesize faces." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. HyperFace Optimization: Figure 2 shows that HyperFace optimization results in a uniform distribution of points on the hypersphere. However, the paper lacks evidence, analysis, or experiments demonstrating that Equation 1 is minimized by this uniform distribution. See [1] for more details.\n\n2. Use of Hyperspherical Points: One key aspect of the method is the use of uniformly positioned points on the hypersphere. While using a pre-trained FR model for generating the identity gallery is reasonable, it would be helpful to see results without applying HyperFace (using pure FR embedding as condition). How would this affect the results?\n\n3. Experimental Section: The experiments section needs major revisions. Most of the subsections only report table results without in-depth discussion or analysis. For a conference of ICLR’s quality, it’s important to explain specific behaviors and insights from the proposed method.\n\n4. Additional Experiments: Additional experiments could improve clarity on the benefits of the method. For example, while Table 3 presents an ablation study on the number of identities on FR performance, it would also be valuable to show how many novel identities the method can generate (what is the saturation point for the number of novel identities). \n\n5. Identity Leakage: The paper mentions identity leakage but lacks in-depth experiments on the synthesized data. What would the performance look like if synthesized images with high similarity to real datasets (e.g., CASIA) were excluded?" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We formulate the dataset generation as a packing problem on the embedding space (represented on a hypersphere) of a face recognition model and propose a new synthetic dataset generation approach." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024hyperface,\ntitle={HyperFace: Generating Synthetic Face Recognition Datasets by Exploring Face Embedding Hypersphere},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4YzVF9isgD},\nnote={under review}\n}" }, "abstract": { "value": "Face recognition datasets are often collected by crawling Internet and without individuals' consents, raising ethical and privacy concerns. Generating synthetic datasets for training face recognition models has emerged as a promising alternative. However, the generation of synthetic datasets remains challenging as it entails adequate inter-class and intra-class variations. While advances in generative models have made it easier to increase intra-class variations in face datasets (such as pose, illumination, etc.), generating sufficient inter-class variation is still a difficult task. In this paper, we formulate the dataset generation as a packing problem on the embedding space (represented on a hypersphere) of a face recognition model and propose a new synthetic dataset generation approach, called HyperFace. We formalize our packing problem as an optimization problem and solve it with a gradient descent-based approach. Then, we use a conditional face generator model to synthesize face images from the optimized embeddings. We use our generated datasets to train face recognition models and evaluate the trained models on several benchmarking real datasets. Our experimental results show that models trained with HyperFace achieve state-of-the-art performance in training face recognition using synthetic datasets." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Face Recognition", "Hypersphere Optimization", "Privacy", "Synthetic Data" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/2f29f02e46319129a97a0e98a45dd4b47a5cf3ef.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "HyperFace: Generating Synthetic Face Recognition Datasets by Exploring Face Embedding Hypersphere" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ZX2a3OKEV
Solving hidden monotone variational inequalities with surrogate losses
main
Active
Variational Inequality;Optimization;Surrogate;Projected Bellman Error;Min-max Optimization
optimization
3;6;8;8
3;3;4;3
2;3;4;4
2;3;4;3
2;2;3;3
6.25
3.25
3.25
3
2.5
0.493742
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- It is very interesting that better convergence of the inner loop does not necessarily translate to better convergence of the outer loop, i.e. more iterations are not necessarily useful (e.g. LM, Sur-GD, GN, Gig 1 & 2). Is there a theoretical justification? How does that tie in with the $\\alpha$-descent rule? If not a low loss value, what makes a \"good\" solution to the inner problem that improves convergence of the outer loop? Has this effect also been observed in the scalar minimization case? If yes, how does it compare?\n- The authors write: \"In general $\\ell_t^*$ may not be zero and so this condition cannot be verified directly, however, this condition can often be met via first-order methods for a fixed number of steps or can be approximated with $\\ell_t^*=0$.\" (line 171) I am wondering how practical $\\alpha$-descent condition is, is it possible to verify this condition in the presented experiments in section 5?\n\nStyle:\n- Fig 1: It is hard to tell the methods apart, maybe use different line styles." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- The extension of iterative surrogate optimization from the scalar to the variational inequality case is a significant contribution.\n- Paper offers a rigorous theoretical analysis.\n- Strong performance of the method compared to the TD0 baseline.\n- Well-written and relatively easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a new method for optimizing variational inequalities with hidden structure by optimizing a series of surrogate losses, thereby extending previous methods for optimizing scalar loss functions. The authors provide a new $\\alpha$-descent condition on the sequence of inner surrogate optimization problems which is used to derive linear convergence rates for the outer optimziation in the deterministic and unconstrained stochastic setting. Specific choices of optimizer for the inner surrogate loss are shown to generalize previous works. Additionally, the authors provide conditions under which linear convergence is achieved.\n\nExperimentally, the method is tested on optimizing min-max games and projected Bellman error. In the min-max setting different variants of the method are compared, showing that the choice of the inner optimizer matters by improving on the special cases treated in previous work. In the RL setting, the surrogate loss perspective is connected to computationally expensive preconditioning methods which is shown to be approximated in the linear case via the presented iterative scheme. In the non-linear case the policy evaluation problem is tackled for two mujoco environments, where different versions of the method are shown to improve over the special case of TD(0) in terms of wall-clock time and sample efficiency." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The empirical finding of better optimization of the inner problem not leading to better optimization of the outer loop is very interesting but unfortunately not examined in more detail. Both a more in-depth experimental investigation and a theoretical justification for this effect could strongly improve the paper, see also the questions below." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. I think it's very interesting that sometimes more inner loop iterations damage the performance as a whole! (I once did some empirics for boosting weak learners in RL, where I saw a similar thing that I attributed to overfitting...). Do you have any explanations/intuitions for why this could happen (ie is it a general phenomenon in surrogate learning, or an artifact of these small-scale game experiments)?\n\n2. The results on improved sample complexity in the RL value prediction tasks are very compelling in my opinion! I think it fits neatly into an overarching philosophy that blind RL is wasteful (in terms of samples/environment interactions), and that some form of guidance really helps. There are whole fields (search goal-conditioned RL, contrastive RL, etc) that attempt to figure out how to learn the right flavor of guidance, and it seems to me that your form of (not-learned!) surrogate losses can be seen as a particularly simple form of this. From your work and the related literature (which you know 1000x better than I), do you suspect that there can be any theoretical insight/proofs on synthetic settings for how surrogate losses get a provable advantage in terms of number of diverse samples? Certainly one can make claims about decreasing the # of outer loop iterations, but I would be very interested if the extra regularity of your simple surrogate loss trajectory (ie the GD trajectory on $z_t$) can manifest as more efficient exploration?\n\n3. Probably not a valuable question, but I have to ask: would it be possible for you to do GAN training? It would be convincing to deep learning practitioners and theorists alike!\n\n3. This is more a question of personal interest regarding this style of surrogate losses (i.e. ones where you take a step in output space, use inner loop to make the model catch up, repeat) and perhaps not specific to VIs, but here goes: *is there any understanding of how this framework assists/counteracts the implicit biases of either (1) optimization methods in the inner loop or (2) architectural choices in the model $g$?* I ask because, particularly in deep learning theory, there is often the vibe that the natural dynamics induced by such design parameters actually can help (see this paper https://arxiv.org/pdf/1802.06509 for a cool result on GD acceleration caused by model depth, for example). I could imagine some settings where the rigidity of the outer loop dynamics on $z_t$ prevent these complicated phenomena (for example, in the linked paper I suspect surrogate losses could prevent the acceleration for adversarial choices of $F$). Conversely, I can certainly imagine settings where the structure of the outer loop drives the optimization more usefully, in a similar fashion to how surrogate rewards in RL help orient an agent in a sparse-reward environment (see Question 2). Is there any understanding of this tradeoff, and perhaps more importantly do you imagine any differences to this tradeoff in the VI setting?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "This paper has a strong originality in that it appears to be the first to extend this type of surrogate losses to VIs, and it does so in a way that provably takes advantage of hidden monotonicity/convexity structure. Importantly, this extension is nontrivial -- there is a simple and solid adversarial example the authors provide (Prop. 3.3) that shows a difficulty gap in comparison with surrogate losses in the scalar optimization case. I think there is the extra strength (though perhaps it isn't highlighted enough, see my \"Weaknesses\" section) that it appears the framework, broad proof techniques, and main takeaways seem rather robust to choice of inner optimization routine (i.e. robust to assumptions, rates of convergence, etc.). In my eyes, the authors have constructed a fairly general methodology to reduce VIs with hidden structure to scalar optimization in a way that directly leverages that hidden structure -- this is very cool, and in general such reduction results tend to be quite powerful (such as online-to-batch reduction, nonconvex-to-convex reduction such as Algorithm 3 in https://arxiv.org/pdf/1806.02958, etc). \n\nThe analysis is also quite clear, proceeding along similar lines to many optimization papers and doing a fantastic job of contextualizing results, definitions, and assumptions in prior works. In particular, Section 4 (before subsection 4.1) skillfully highlights the flexibility of choices of inner loop optimizers in an organized way, noting equivalence to prior methods (such as PHGD) where applicable. This is a good transition to the experiments, which compare different setups of the inner loop routine in various minimax and RL value-prediction environments. Overall, I think this presentation is clear, the assumptions are obvious, and the experimental apparatus seems right for the general framework (though it would have been cool to see a slightly larger-scale experiment, and I think GAN training is the perfect low-hanging fruit if the authors have the resources!). Lovely stuff :)" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors introduce a surrogate-loss-based framework for optimization of variational inequality (VI) systems, such as min-max. \n\nThis paper is the first to extend the surrogate methodology to (strongly-monotone) VIs, where the inner loop calls a scalar optimization routine black-box. Importantly, they demonstrate with an elementary adversarial example that surrogate methods in these VIs are qualitatively more complex than surrogate methods in traditional scalar optimization (the inner loop progress needs to be strong enough to counteract the effects of $F$ to ensure outer convergence, as opposed to just $<1$ in scalar case). They show that (under sufficient inner loop optimization assumptions, quantified in the form of the $\\alpha$-descent condition) the overall VI optimization succeeds in deterministic and stochastic regimes, with rates matching what is to be expected of strongly-convex optimization (ie geometric convergence). Lastly, they observe that an existing VI optimization method (PHGD) can be seen as a particular choice of inner loop optimization routine, and they investigate the benefits and consequences of alternative choices of inner loop algorithm and number of steps. \n\nExperimentally, the authors test the surrogate method for VI optimization in previously-investigated small-scale games, as well as on value prediction in fairly substantial RL settings. They observe interesting consequences of certain choices of inner loop algorithm and number of steps, and they demonstrate the value of the surrogate framework. In the RL experiments they see a significant improvement in environment sample complexity due to multiple inner loop iterations -- this matches some related work, but importantly does not require complex learned surrogate reward signals!\n\nTo summarize, the paper introduces the surrogate framework (outer loop over outputs and inner loop over model parameters) to a class of VI problems, demonstrates the nontrivial result that scalar optimization can (under some assumptions) be used black-box as a subroutine, and empirically investigate the associated phenomena on tasks of increasing complexity. Very cool!" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I think the main (and only, to be honest) weakness of this paper is a weakness of presentation -- in particular, I feel that (as outlined in the \"Strengths\" section of my review above), the main contribution of this paper is that it clarifies and formalizes a framework where black-box scalar non convex optimization guarantees can be bootstrapped up to hidden-structure VI guarantees. However, at many points I felt that the presentation did not highlight this strongly enough, and instead chose to focus on particular rates/modes of convergence and substantiating particular assumptions. \n\nTo be specific, I would argue that the $\\alpha$-descent condition for inner loop progress is a bit of a red herring. As you mention, such convergence can only be shown in specific situations under the usual non convex optimization assumptions (PL condition, for example), which can often be difficult to justify. However, I feel that it's even unnecessary for you to justify it! It seems to me that, for example, Lemma A.2 and Prop A.3 would go through (perhaps with significantly more work) for weaker/more exotic types of inner loop guarantee -- the vibe of the proofs is moreso that (strongly-monotone) VIs allow you to push through the classic optimization proofs (the [Sakos '24] paper offers a similar takeaway in terms of bootstrapping convex VI guarantees up to hidden-convex VI guarantees, see their statements on p. 11). I bet there is a way to turn this into a strength of your paper: maybe something more like \"we prove things under the $\\alpha$-descent condition for clarity, but our meta-point is a more general reduction from VI optimization to scalar optimization via surrogate losses\". I am not recommending you to do the analysis under all kinds of crazy inner loop guarantees, but instead to reweight the presentation a bit to highlight the robustness to inner loop method.\n\nI will say that the $\\alpha$-descent setting is a fantastic choice of inner loop guarantee to demonstrate the difficulty gap between scalar vs VI surrogate methods; it makes the presentation of the adversarial example very clear. However, I would have liked to see it used more as a presentation tool/particular instantiation of a more general phenomenon, whereas it often felt like you were keeping it around as a core part of your results. If the impact of this condition was qualitatively different in the VI setting than in scalar surrogate optimization then that would be one thing, but I am unsure of this (note: I am not too familiar with this style of surrogate losses via hidden convexity/monotonicity -- if I am wrong about this perhaps a toy example exemplifying the difference would be cool!). \n\nTo really hammer this point home (sorry, but I don't really see any other weaknesses to write about), I feel like over-indexing on this particular criterion forces you to get stuck in the muck of justifying assumptions such as spectrally bounding input-output Jacobians of neural networks -- to some this may be a losing battle (models on complex, high-dim data will be likely to disregard features dynamically and hopefully learn lower-rank representations), but one I don't think you need to be fighting! Certain hypothesis classes/optimizers/datasets/etc will have different choices of inner loop routine that make sense, and the beauty of what you've shown here is that surrogate methods in VIs appear flexible to these choices. The language of reductions feels much more natural for such a result: I give you a VI problem with hidden monotone structure, you reduce it to a sequence of scalar non convex problems, and the practitioner/domain expert figures out the right inner loop soup (choosing between first-order or second-order methods, bias-variance tradeoffs, etc) to make it work. \n\nTo sum up, it is my opinion that if you are going to use non convex optimization as a black-box for the inner loop, treat it like a black-box (not just in terms of whether to use ADAM as the optimizer, but even in a broader sense). Aside from this (and some questions that I put in the \"Questions\" section), I have nothing else to say but awesome paper!" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "(1) In the basic case of supervised learning with a scalar loss, can we expect the proposed method perform better than off-the-shelf optimizers that work directly in the parameter space, i.e., Adam?\n\n(2) The condition in the while loop of Algorithm 1 can not be verified. How could we let alpha be the user-defined parameter?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The considered problem is important in the classical optimization context (i.e., constrained optimization, complementarity) and mordern ML where the loss is structured. The problem is also more general than minimizing a scalar loss usually showing up in supervised learning. The experiments show that the proposed method work fine in practice." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes an algorithm using some type of surrogate approximation to solve variational inequality (VI) problems. Those problems can be seen as finding first-order stationary points of a constrained optimization problem, but probably the setting in this paper is more general than that since the vector-valued function F is not necessarily a gradient (e.g. max-min game). The main idea is that \"composite\" (between the model and the loss) optimization problems in machine learning normally exhibit some structure, e.g., the loss w.r.t. to the model's output is convex (but the whole optimization function is not convex w.r.t. the model's parameters), one can push the \"difficulty part\" relating to model into the constraint to make the objective function convex. The authors then design a sequence of surrogate functions to minimize this reformulation problem and show convergence under a condition called \"alpha-descent\". To minimize the surrogate functions, they employ classical methods like Gaussian-Newton or Levenbergh-Marquardt. Numerical experiments are performed for some toy min-max games and in the reinforcement learning context." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper is challenging to follow, particularly in its transition from problem (1) to the construction of the surrogate model, where additional discussion would be beneficial. The assumptions also seem overly restrictive. For instance, while assuming convexity of the loss with respect to the model's output is reasonable for most loss functions, the assumption that the constrained domain is convex feels unnecessarily limiting, even though the authors provide a few narrow examples. Furthermore, the alpha-descent condition (5) requires closer examination, as it appears to be a stringent requirement. Specifically, it requires a single constant alpha that holds uniformly across all t." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Questions:\n1. You claim that Sakos et al. (2024) assumes $\\mathcal{Z}$ bounded implicitly in their main lemma. Can you clarify which lemma, where it is assumed and which results are affected? I could not easily find it and I think this is an important point since it uncovers a fallacy in a previous analysis.\n\nComments:\n- The second condition on $\\alpha$ in theorem 3.2 seems to be always satisfied by setting $p=1$ and $C‎ = \\alpha/\\eta$. From the proof it appears that there is an hidden dependency between $\\eta$, $C$, $p$, and $\\alpha$. The statement should either include such dependency or at least clarify this aspect.\n- The use of the term “gradient step” contrasts with the Variational inequality formulation where in general $F$ is not a gradient. A possible alternative could be \"proximal step\".\n- The related work paragraph in the introduction should probably be a subsection or its name should be removed or changed.\n- The PL condition is not properly defined in the main text. It could be defined close to Line 345 or at least in the appendix.\n\nReferences:\n\nSakos, Iosif, et al. \"Exploiting hidden structures in non-convex games for convergence to Nash equilibrium.\" Advances in Neural Information Processing Systems 36 (2024)." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The theoretical analysis is quite thorough, considering deterministic and stochastic cases and also addressing an issue with a previous analysis. The authors also explain how previous methods fit in their framework and discuss assumptions and potential limitations.\n2. Promising experiments showing that the method can achieve faster convergence with more than one inner step iteration also in practical settings such as reinforcement learning with MLP value functions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the problem of solving variational inequalities with deep learning methods with a hidden monotone structure. The authors present an iterative optimization algorithm with two nested loops, where In the outer loop, a surrogate square loss is constructed and partially minimized in the inner loop (until a sufficient decrease condition called $\\alpha$-descent is satisfied) using an optimiser such as a quasi-newton method or ADAM. When $\\alpha$ is sufficiently smaller than $1$, The authors prove linear (in the outer iterations) convergence guarantees in deterministic and stochastic settings, where in latter, the algorithm converges to a neighbourhood of the solution. They also prove that, when considering general variational inequalities, $\\alpha < 1$ is not sufficient to guarantee convergence. Further, they show how several methods can be seen as special cases of their algorithm. They also present experimental results on min-max optimization and reinforcement learning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Certain parts lack clarity. Condition 2 in Theorem 3.2 seems unnecessary and needs refinement. (See question and comments).\n2. The paper lacks larger scale experiments. For example Deep RL experiments (with bigger underlying neural networks) could be included to demonstrate the claimed scalability of the method." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "A novel surrogate loss approach to solving variational inequalities with function approximation. Both theoretical guarantees and empirical analysis is provided." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024solving,\ntitle={Solving hidden monotone variational inequalities with surrogate losses},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ZX2a3OKEV},\nnote={under review}\n}" }, "abstract": { "value": "Deep learning has proven to be effective in a wide variety of loss minimization problems.\nHowever, many applications of interest, like minimizing projected Bellman error and min-max optimization, cannot be modelled as minimizing a scalar loss function but instead correspond to solving a variational inequality (VI) problem.\nThis difference in setting has caused many practical challenges as naive gradient-based approaches from supervised learning tend to diverge and cycle in the VI case.\nIn this work, we propose a principled surrogate-based approach compatible with deep learning to solve VIs.\nWe show that our surrogate-based approach has three main benefits: (1) under assumptions that are realistic in practice (when hidden monotone structure is present, interpolation, and sufficient optimization of the surrogates), it guarantees convergence, (2) it provides a unifying perspective of existing methods, and (3) is amenable to existing deep learning optimizers like ADAM.\nExperimentally, we demonstrate our surrogate-based approach is effective in min-max optimization and minimizing projected Bellman error. Furthermore, in the deep reinforcement learning case, we propose a novel variant of TD(0) which is more compute and sample efficient." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Variational Inequality", "Optimization", "Surrogate", "Projected Bellman Error", "Min-max Optimization" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/3ef1874662ba306443268c5cf5b25eb4200f3070.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/61e226b49c5676de9ee6a3e290bddcce884801b4.zip" }, "title": { "value": "Solving hidden monotone variational inequalities with surrogate losses" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ZeOIf2dtC
Looking beyond the surface with Contrastive LEarning with Anti-contrastive Regularization (CLEAR)
main
Active
Weakly Supervised Learning;Disentangled Representation Learning;Variational Autoencoder;Contrastive Learning
unsupervised, self-supervised, semi-supervised, and supervised representation learning
3;3;3;5;5
5;4;4;3;3
2;3;2;3;3
2;1;2;2;2
1;2;2;2;3
3.8
3.8
2.6
1.8
2
-0.872872
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see the weaknesses section" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The use of Pair Switching (PS) as an anti-contrastive regularization method is new and directly addresses limitations in previous disentanglement approaches by encouraging independent style distributions across content labels.\n\nBy disentangling content and style, CLEAR-VAE enhances classification accuracy on previously unseen content-style combinations, which is valuable in applications sensitive to bias or style variance.\n\nThe framework's potential to mitigate biases in healthcare and other applications underscores its real-world impact, highlighting the model’s relevance to equitable decision support." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces an amazing new framework named Contrastive Learning with Anti-contrastive Regularization (CLEAR-VAE). It's designed to take data representation disentanglement to the next level by separating essential \"content\" features from irrelevant \"style\" attributes. CLEAR-VAE takes the Variational Autoencoder (VAE) model to the next level by integrating a cutting-edge Pair Switching (PS) anti-contrastive regularization. This mechanism is a game-changer! It effectively disentangles content from style representations in a weakly supervised setting by penalizing style features with different content labels. This framework is a game-changer in representation learning. It allows data with similar labels to maintain similar content while style remains independent, enhancing model generalization on unseen data. CLEAR-VAE has been evaluated across image and text datasets, and it has shown incredible potential in enhancing classification performance by effectively managing content and style in latent representations. It has introduced PS regularization, content-style swapping experiments, and a novel metric to quantify disentanglement efficacy." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While the paper mentions the impact of temperature and similarity metrics in SNN loss, it could benefit from a deeper analysis of hyperparameters and their effects on performance, especially in complex datasets.\n\nThe paper could improve its impact by comparing CLEAR-VAE’s performance with other recent disentanglement methods.\n\nSince CLEAR-VAE relies on content labels as the only form of weak supervision, the approach may be limited when label noise or ambiguities exist. This limitation is particularly relevant in real-world applications where ground-truth labels are less reliable." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. In the quantitative experiments, the performance advantage of CLEAR-VAE over the baseline model exhibits differing trends across datasets as the value of $K$ increases(Fig. 6). Specifically, on the styled-MNIST dataset, this advantage diminishes, whereas on the CelebA dataset, it amplifies. Given that both datasets are image-based, it would be beneficial for the authors to provide an explanation for this discrepancy.\n2. In line 233, the phrase *\"we will encourage the representation to be ambiguous about the supervised label\"* raises a question: does this imply minimizing $ I(z^{(s)}; y) $ or maximizing $gMIG(y)$?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. **Problem Significance**: This paper addresses a significant challenge—disentangling content and style representations in VAEs. This approach not only enhances interpretability but also increases the model's robustness against superficial sources of variability, making it better suited for downstream tasks.\n\n2. **Elegance of Anti-Contrastive Regularization**: The design of the anti-contrastive regularization is both elegant and effective. By flipping the labels of positive and negative pairs in $L_{SNN}^{(s)}$, it achieves a similar effect as directly minimizing $-L_{SNN}^{(s)}$ while ensuring that the regularization term remains non-negative. This method simplifies model optimization, eliminating potential issues related to negative terms.\n\n3. **Comprehensive Evaluation**:\n - **Qualitative**: The results of the swapping and interpolation operations are impressive, effectively showcasing the model's disentanglement capabilities (see Fig. 3, Fig. 4, Appx. A.6).\n - **Quantitative**: The setups for evaluating the model's generalizability are well-defined, offering clear insights into performance (e.g., Table 2)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces CLEAR-VAE, a novel weakly supervised framework designed to learn semantically disentangled latent representations of content and style within VAEs. Unlike traditional methods, CLEAR-VAE leverages contrastive pairs rather than explicit ground truth labels, offering a more flexible approach to disentangling these features. Specifically, content encompasses information critical for downstream tasks, while style includes superficial and irrelevant attributes to those tasks.\n\nCLEAR-VAE achieves disentanglement by extracting group-level content representations through groups organized by ground truth labels and by separating style from content representations without requiring labels for style attributes. To accomplish this, the framework enhances the standard $\\beta$-VAE loss by introducing two additional penalties: (1) a contrastive regularization adapted from the SNN loss, which encourages similarity in $z^{(c)}$ (content) representations within the same downstream label, and (2) an anti-contrastive regularization that promotes ambiguity in $z^{(s)}$ (style) regarding the downstream label.\n\nTo evaluate its effectiveness, the paper provides both qualitative and quantitative experiments. The qualitative results confirm the successful disentanglement of content and style representations. At the same time, quantitative comparisons against ML-VAE and baseline models demonstrate CLEAR-VAE’s superior performance and generalizability to unseen combinations of style and content." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. **Core Innovations and Clarity**: The core contributions of this paper, according to the authors, are a weakly supervised framework and an anti-contrastive regularization for style representation.\n - **Weakly Supervised Framework**: For semantic disentanglement of style and content, the authors introduce a weakly supervised framework. However, they omit comparisons with recent similar works, which could lead to misunderstandings that this is the first use of a weakly supervised framework in this context. For instance, other papers have also adopted weakly supervised approaches, such as:\n - *[\"SW-VAE: Weakly Supervised Learning of Disentangled Representations via Latent Factor Swapping\"](https://arxiv.org/abs/2209.10623)*\n - *[\"Weakly Supervised Disentangled Generative Causal Representation Learning\"](https://jmlr.org/papers/v23/21-0080.html)*\n - **Anti-Contrastive Regularization**: The authors state that the benefit of anti-contrastive regularization over direct optimization of $-L_{SNN}^{(s)}$ is that it avoids complicating the minimization of final loss. However, this claim lacks further explanation or experiment validation, which weakens its persuasiveness regarding the ''complication'' mentioned.\n\n2. **Some Mathematical Errors and Confusing Sentences**:\n - The VAE loss in Equation 1 should be the negative ELBO.\n - The two symbols $L_{SNN}^{(s)}$ and $L_{SNN^{(s)}}$ have been used interchangeably, confusing the reader.\n - In Equation 9, *z\\** is supposed to represent the latent variable with the maximum normalized mutual information, but there is no annotation provided.\n - In line 358, it is unclear why contrastive regularization is applied specifically to the EOS token's latent representation, while KL regularization applies to the entire set of contextualized embeddings." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- supervised contrastive learning: Would it make sense to include the paper \"Supervised Contrastive Learning\" by Khosla et al.? It is based on a similar idea to combine labels and contrastive learning.\n- are the assumptions realistic? This question is related to the motivation of the paper. How likely is it that we will have access to all the content labels during training? It would be interesting to analyze what happens if it is not the case. What about an experiment where only a subset of content labels is available?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- **Novelty and Contribution**:\n - The authors address the challenge of disentangling style and content without relying on explicit style labels. The proposed PS technique and weakly supervised contrastive framework represent a meaningful advance in disentangled representation learning.\n - The disentanglement method has potential applications in real-world scenarios where spurious correlations in training data (e.g., demographic biases) can affect model generalizability and fairness.\n- **Thorough Experimental Analysis**:\n - The paper presents comprehensive experiments, including swapping and interpolation tasks, to visually illustrate CLEAR-VAE’s disentangling capabilities. Moreover, quantitative measures of generalizability on unseen style-content combinations reinforce the practical benefits of CLEAR-VAE." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces CLEAR-VAE (Contrastive Learning with Anti-contrastive Regularization for Variational Autoencoders), a framework designed to disentangle content and style components in data representations. The authors propose a new Pair Switching (PS) technique to ensure style features remain independent of the content, enhancing model robustness against superficial variations. CLEAR-VAE’s performance is demonstrated across multiple datasets, including Styled-MNIST, CelebA, and Amazon Product Reviews, with both qualitative and quantitative evaluations of the learned disentangled representations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- **Limitations of Experimental Setup**:\n - The study uses relatively simplified datasets, such as Styled-MNIST and CelebA, for experiments, which may limit the understanding of CLEAR-VAE’s performance on more complex data. Testing the model on more challenging real-world datasets, particularly those with higher-dimensional variations in style (e.g., medical imaging data), would better showcase its generalizability and robustness.\n- **Limited Baselines**: There are limited comparisons to baseline approaches. In [1], more approaches working in the weakly-supervised setting are discussed that could also serve as VAE-baselines, e.g. Group VAE. [2] propose a method that can automatically infer the group size of shared factors during training, which might at least be worth discussing in the related work section. And what about comparisons to other contrastive learning approaches?\n- **Motivation**: I am not sure the motivation for the approach is clear to me. Given that the content labels are available during training and the assumptions state that only content labels are necessary for downstream prediction tasks, why are we training a VAE and not a supervised classifier?\n\n\n---\n[1] Locatello et al., \"Weakly-Supervised Disentanglement Without Compromises\", ICML 2020\n[2] Sutter et al., \"Differentiable Random Partition Models\", Neurips 2023" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "How can this method be applied in a healthcare scenario where unwanted style attributes are often partially observed (e.g., acquisition site, demographic attributes such as age and sex are generally available in practice)? Could this approach be adapted into a semi-supervised framework that accounts for partially observed style attributes?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "This paper effectively motivates the subject of disregarding style information within the content latent space by using the example of a medical healthcare application. It soundly presents the theoretical foundations of the concepts it utilizes (ELBO loss and variational auto-encoder, Mutual Information Gap, a classifier-free disentanglement measurement method, etc.). The paper illustrates its results well, both quantitatively (showing generalization in a downstream imbalanced classification scenario and demonstrating that encouraging disentanglement promotes generalization) and qualitatively (with intuitive visual experiments using CelebA and Style MNIST)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a novel framework called CLEAR (Contrastive Learning with Anti-contrastive Regularization), designed to effectively disentangle content and style in data representations. CLEAR utilizes a single set of content labels (information required to perform a downstream task of interest), while the style source of variability (consisting of attributes irrelevant to the downstream task) is not explicitly required for model training. CLEAR introduces Pair Switching, a Contrastive Learning-inspired regularization loss and a label-switching method that enables the content latent space to learn only the content information. This framework allows for the separation of relevant content features from superficial style attributes, ultimately enhancing the generalizability of models in downstream tasks. Experimental results demonstrate that CLEAR not only facilitates the interpolation and swapping of content and style between samples but also improves classification performance in scenarios with unseen combinations of content and style. Overall, the proposed method offers significant advancements in creating robust representations that mitigate the effects of unwanted variability, particularly in sensitive applications like healthcare." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Overall, it seems that Variational Autoencoders are not the latest state-of-the-art models in terms of both 1) generation and 2) representation learning, which dampens the practical use of this method compared to diffusion models and contrastive learning methods. Additionally, the comparison with existing methods appears to be relatively weak, and similar approaches could have been discussed or compared more thoroughly, particularly the Capturing Characteristics VAE (cc-VAE) [A].\n\nFurthermore, the Pair-Switching loss could have been compared with standard Mutual Information minimization losses, such as Total Correlation [B] or kernel Joint Entropy Minimization loss [C].\n\nUltimately, the experimental section seems relatively weak, as it only involves three datasets. Experiments in healthcare applications with real-world impact would have been expected.\n\n[A] Capturing Label Characteristics in VAEs, T. Joy et al. ICLR 2021, https://arxiv.org/abs/2006.10102. \n[B] Abubakar Abid and James Zou. Contrastive Variational Autoencoder Enhances Salient Features, 2019, https://arxiv.org/abs/1902.04601. \n[C] Separating Common from Salient Patterns with Contrastive Representation Learning, R. Louiset et al., ICLR 2024, https://arxiv.org/pdf/2402.11928." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- **Q1.** What is the difference in the text between a group and a class? Are they the same?\n- **Q2.** What is the Jeffrey divergence?\n- **Q3.** Where is the naming \"anti-contrastive\" coming from?\n- **Q4.** Why explaining a VAE as only having Gaussian-distributed posteriors/likelihoods? I know it is the common practice, but still.\n- **Q5.** How do you decide the dimensionality of the content and style variables? And why did you decide to have an autoencoder (rather than just an encoder) if only the content is relevant for the downstream task?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- **S1** The intuition of the method is quite clear.\n- **S2** The problem of finding disentangled representations (content vs. style) in latent space is of interest to the community.\n- **S3** The qualitative results are quite impressive, and the quantitative ones are clear as well.\n- **S4** The setup for the quantitative experiments are clearly detailed." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work proposes a weakly supervised method to disentangle content and style latent variables in variational autoencoders (VAEs), where content variables are those that have a (non-spurious) correlation with the downstream task to solve, and style variables are the rest of them (which should not affect the downstream task). To this end, the authors propose a combination of three losses: a $\\beta$-ELBO to encourage disentangle representations, a contrastive loss to incentivize content features from the same downstream label to be equal, and another contrastive loss to incentivize the style features from different downstream labels to be equal. The authors demonstrate empirically the efficacy of the proposed method by: qualitatively generating samples that swap/traverse the content and style variables; and quantitatively by comparing the performance of their model with another VAE model (ML-VAE)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The manuscript has many things to improve in its current state. While I will point some of the most critical ones below, this is no an exhaustive list and I did not dive as deep in other aspects (e.g. experiments) since they would require the list below to be addressed:\n\n- **W1.** The paper's writing contains several typographical errors (L40, L147, L201, L218, L247), incoherent/unnecessary statements (L63,L149-L151), and many unexplained terms (L63, L155, L194, Eq. 9, L215, L253, contrastive learning is given as related work but not explained, etc), and dubious examples (e.g. the one describe for the medical domain is questionable at best, since differences _directly caused_ by the sensitive attributes should **not** be removed from the predictions).\n- **W2.** The maths contain many typos (Eq 1 should negate the loss, Eqs 4,5,6 are missing a closing parenthesis, Eq. 8 writes $c$ instead of $s$, etc.) and questionable statements (L184: you cannot _reasonably_ assume the posterior factorizes, as they are _dependent_ given $x$; L204: what is the norm of two comma-separated terms?; why not using the definitions in Eq. 5 and 6 in the section before?; L236: Why would having a negative value complicate the minimization?; L248: why would the model reach an equilibrium at all?; etc.). This is on top of statements that find little explanations/intuitions (e.g. L219: why would the content get mixed with the style feratures, if they are predictive of the label?)\n- **W3.** The most pressing issue is the literature review (and therefore the baselines in the experiments) which is really weak and misses a lot of relevant works. To name just a few, there are a number of works on identifying content from style variables (for example, [1,2]), which has also been applied to multimodal VAEs [3]. Moreover, contrastive learning has already been applied to successfully learn multimodal representations [3,4]. All these works (which form by no means an exhaustive list) are relevant and the authors should discuss and compare with them.\n\n---\n\n[1] [Self-Supervised Learning with Data Augmentations Provably Isolates Content from Style](http://arxiv.org/abs/2106.04619)\n\n[2] [Multi-View Causal Representation Learning with Partial Observability](http://arxiv.org/abs/2311.04056)\n\n[3] [Identifiability Results for Multimodal Contrastive Learning](https://openreview.net/forum?id=U_2kuqoTcB&s=09)\n\n[3] [Relating by Contrasting: A Data-efficient Framework for Multimodal Generative Models](http://arxiv.org/abs/2007.01179)" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a weakly supervised framework based on Contrastive LEarning with Anti-contrastive Regularization (CLEAR) to effectively disentangle and recognize $content$ and $style$ in the latent space." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024looking,\ntitle={Looking beyond the surface with Contrastive {LE}arning with Anti-contrastive Regularization ({CLEAR})},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ZeOIf2dtC},\nnote={under review}\n}" }, "abstract": { "value": "Learning representations that are robust to superficial sources of variability is important to ensure such variability does not impact downstream tasks. For instance, in healthcare applications, we might like to learn features that are useful for identifying pathology, yet have similar distributions across diverse demographic groups, leading to more accurate and equitable diagnoses regardless of background or surface characteristics. More broadly, this capability can improve the generalizability of our representations by mitigating unwanted effects of variability not seen during training. In this work, we suppose that data representations can be semantically separated into two components: $content$ and $style$. The $content$ consists of information needed for downstream tasks -- for example, it is predictive of the class label in a downstream classification problem -- whereas the $style$ consists of attributes that are superficial in the sense that they are irrelevant to downstream tasks, yet may compromise performance due to associations observed in training data that do not generalize. Here we propose a weakly supervised framework, Contrastive LEarning with Anti-contrastive Regularization (CLEAR), to effectively disentangle $content$ and $style$ in the latent space of a Variational Autoencoder (VAE). Our anti-contrastive penalty, which we call Pair Switching (PS), uses a novel label flipping approach to ensure content is recognized effectively and limited to the $content$ features. We perform experiments to quantitatively and qualitatively evaluate CLEAR-VAE across distinct data modalities. We then analyze the trade-off between disentanglement and ELBO, and the impact of various hyperparameters within our framework. Our results show that using disentangled representations from CLEAR-VAE, we can: (a) swap and interpolate $content$ and $style$ between any pair of samples, and (b) improve downstream classification performance in the presence of previously unseen combinations of $content$ and $style$." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Weakly Supervised Learning", "Disentangled Representation Learning", "Variational Autoencoder", "Contrastive Learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/7e11c1a944aadef0cb78938b2134d44d8709fe37.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Looking beyond the surface with Contrastive LEarning with Anti-contrastive Regularization (CLEAR)" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ZhUKd05QM
LGDiffGait: Local and Global Difference Learning for Gait Recognition with Silhouettes
main
Active
Gait Recognition; Movement Difference Modeling; Temporal Modeling
applications to computer vision, audio, language, and other modalities
3;3;5;5;5
5;5;5;3;5
3;2;3;2;2
2;2;2;3;2
3;3;2;1;3
4.2
4.6
2.4
2.2
2.4
-0.408248
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. What are the computational costs associated with the LGDiffGait model?\n\n2. How does the LGDiffGait model handle noisy silhouette data resulting from poor segmentation or alignment processes during preprocessing? \n\n3. Are there ongoing or planned future works to adapt the LGDiffGait framework for use with RGB data? What potential methodologies or modifications are being considered to incorporate color and texture information into the current model?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "+ Well-structured paper with clear explanations of the methods and results.\n\n+ Demonstrates state-of-the-art results on multiple gait recognition datasets, showing improvements over existing methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces an approach to gait recognition that leverages both local and global difference learning within video silhouettes to enhance feature extraction. The method uses Local Difference Modules (LDM) and Global Difference Modules (GDM) to capture intricate motion details across both short and long temporal spans, with a Temporal Alignment Module ensuring consistency across the extracted features. The framework significantly outperforms existing methods on multiple benchmarks, demonstrating its robustness and effectiveness in gait recognition across diverse conditions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper introduces the concept of local and global gait differences without a thorough discussion of the underlying motivations or theoretical foundations compared to traditional spatial-temporal approaches. Insightful exploration into specific scenarios where existing methods fail could substantiate the need for this new approach. A deeper analysis would help clarify why the proposed method better captures unique gait characteristics, potentially through comparative studies or by linking the approach to fundamental biomechanical principles of human motion.\n\n2. Noise in silhouette data could affect difference accuracy. The reliance on pre-processed silhouette data, which is susceptible to noise from segmentation and alignment errors, raises concerns about the integrity of the gait differences captured by the model. This method's effectiveness might be compromised if these preprocessing steps introduce artifacts that are mistaken for intrinsic gait differences. The paper could benefit from a robust discussion on preprocessing techniques' reliability and strategies to mitigate their impact, ensuring that the gait differences reflect true biomechanical motion rather than processing inaccuracies.\n\n3. Absence of cross-dataset evaluation limits the demonstrated generalizability of the LGDiffGait model. Including such evaluations would not only validate the model's robustness across varied settings but also highlight its performance stability amidst different capture conditions and demographic variabilities. Insights into how the model performs when trained on one dataset and tested on another could underscore its utility in real-world applications and help identify potential biases or limitations in dataset-specific training.\n\n4. It would be advantageous for the research to examine the model's applicability to RGB data, which remains unexplored and thus limits its use in scenarios where only RGB data is available. It would be valuable to discuss or demonstrate how the model could be adapted for RGB inputs, potentially expanding its practical relevance and adoption. Exploring methodologies to integrate color and texture information available in RGB data could potentially enhance the model’s discriminatory power by leveraging additional cues beyond silhouette shapes.\n\n5. It would be beneficial for the paper to explore the impact of frame step size on the performance of gait recognition. Since the frame interval can significantly influence the detection of subtle gait differences, investigating optimal step sizes for different gait speeds or conditions could yield deeper insights. It would be informative to analyze how varying intervals affect the model’s ability to detect meaningful differences, which would enhance our understanding of the model’s sensitivity and operational flexibility." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "The paper is about gait identification from videos. Only public data have been employed here, but some comments on ethical aspects may be beneficial." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- About existing approaches: the discussion does not mention works based on architectures for sequences (e.g. LSTMs or Transformers). Are these approaches missing? Can you discuss how your approach compares to them, if existing in literature?\n\n- About the need for both local and global temporal representations of motion, existing deep architectures as the SlowFast (https://arxiv.org/abs/1812.03982) addressed this problem. From what I can gather, it seems to me that the attempt of the authors is different, in the sense they want to keep the model complexity under control. Nevertheless, a discussion in relation to these already existing approaches would be beneficial to fully appreciate the intentions of the authors and to better contextualize your design choices\n\n- An important influence on the performance of the method is from the silhouettes in input. Comments in this sense are missing\n\n- The reader is a bit lost in the details of the method. Although in some parts they are even redundant (e.g. when describing twice, with text and with a formula, the main architectural operations), in my opinion a clear storytelling of the method is missing. In particular, it is unclear to me the flow in the forward propagation. What's the input? A single image, image pairs, the whole sequence? [Further doubts on this part are related to some of my questions below.]\n\n- Related to the first point, I miss the meaning of Fig. 1. Should this be intended as an example of input? Under what circumnstances we are facing the different situations? I suggest you to provide a more detailed caption of the figure, clarifying the purpose of the figure.\n\n- In sec. 3.2.2 the need for the padding is mentioned, but the technical/practical motivations are unclear\n\n- It would be nice to have an intuition on the behavior of the Local Difference Module with an example (an image?)\n\n- When computing the differencing steps, the procedure is reminiscent of a change detection approach. Is this correct?\n\n- The index t appears only in the GDM, so it is not clear to me how the sequence is processed\n\n- The presence of a triplet loss unveils that a specific training strategy is adopted, but this is introduced only in Sec. 3.3 with no appropriate discussion. How is the training organized? I suggest you to provide a more detailed explanation of the training procedure (including for instance the sampling strategies used for the input pairs) in the point of the paper you find the most appropriate.\n\n- The results from all methods are very high in general, with no particular coherence between the different views or any common pattern as the viewing angle is changing. Any intuition on the reasons why? Can this give suggestions on the nature of the datasets or the generalization capabilities of the methods? What are the implications for the practical applicability of gait recognition systems?\n\n- A thorough discussion on limitations would be appreciated\n\n- A comment on ethical aspects is needed" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper considers an interesting problem, whose relevance goes beyond gait recognition\n- The state-of-the-art is fairly discussed (with few exceptions, see Questions)\n- The results are superior to existing approaches on different public datasets" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents an approach for gait identification called Local and Global Difference Learning for Gait Recognition with Silhouettes (LGDiffGait). The method incorporates local and global gait features in a unique representation. The approach is evaluated on different public datasets and, compared to existing methods, provides superior results." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The presentation of the method and the procedure flow is not fully clear (see Questions). I think this is \n- The limitations of the approach are only briefly touched (the authors mention the computational aspects)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "In the first row of Table 5, does this indicate that the whole LGDDiff blocks have been removed? If that is the case, does the evaluated backbone contain only 3D conv + TP + HPP, yet still achieve such high performance?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "+ Using the difference information along the temporal dimension is reasonable for enhancing gait recognition.\n+ Experimental results show the SOTA performance of the proposed method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a gait recognition framework named LGDDiffGait, which incorporates Local and Global Difference (LGDiff) blocks. The LGDiff block consists of two components: a Local Difference Module (LDM) and a Global Difference Module (GDM). The LDM captures local motions between adjacent frames within a sliding window, while the GDM captures global differences across the whole sequence. A Temporal Alignment Module (TAM) is further used to align the extracted local and global differences with the overall sequence dynamics. Experiments on four gait datasets demonstrate that the proposed method achieves SOTA gait recognition performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The biggest concern is the theoretical novelty of the proposed method. The use of difference features has already been explored in DyGait (Wang et al. 2023b), which is almost the same as the global difference module in this work. The primary distinction lies in the introduction of the local difference module, which shifts the extraction of difference features from the entire sequence—as utilized in the global difference module—to differences across several adjacent frames within a sliding window, which is a minor modification. In addition, the learning of local features has been widely applied in gait recognition, both in spatial and temporal domains, and is not a new concept. Consequently, these factors limit the technical contribution of this paper.\n\n- The proposed framework and the approach to learning difference features are primarily tailored for the specific task of gait recognition, offering limited insights for broader tasks or other areas of representation learning. So, this paper may not be ideally suited for ICLR.\n\n- In the temporal alignment module, it is explained that the difference features are aligned with the overall sequence dynamics. However, from my understanding, the temporal order is preserved when extracting difference features, so it is unclear why temporal misalignment of the difference features would occur. In addition, temporal alignment is proposed to be achieved by concatenating the main features with the difference features and further applying a 3D convolution. The rationale behind these operations for achieving temporal alignment also requires further clarification.\n\n- There are a few typos in the paper; for example, in Section 3.2.4, deisned -> designed. Some descriptions of related works may not be entirely accurate. For example, SMPLGait (Zheng et al., 2022) is more accurately described as a fusion of appearance-based and model-based methods rather than purely model-based. The 3D model is used solely to learn view transformation for silhouette features, with recognition relying exclusively on silhouette features." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Optical flow also focuses on the difference, so what is the advantage of using this LGDiff?\nSince the difference could capture the nuances, why does the model need the main convolution branch rather than just using the difference?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is easy to understand.\n2. The figures are clear and the tables are easy to read.\n3. The proposed method is reasonable in that more hand-craft features are involved in the feature extraction leading the overall performance improvement." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors found the problem of existing methods that mainly focus on extracting the feature on the entire gait sequence, so they introduced the LGDiff block to get the difference in local and global levels with a temporal alignment module to help the model focus on more detailed movement. Based on the experimental results, the performance over four datasets is higher than the SOTA methods" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. How does the alignment module align temporal information? It just combines the main and the difference features, and it is not proper to define it as 'align'\n\t2. How much the model size is increased? It seems it introduces a dual network to extract the difference features. And DeepGaitV2 is big enough, LGDiffGait is likely to be a larger model, so it is hard to say the improvement is solely from a nice model design or better feature\n\t3. The authors said the difference is an essential feature to measure the detailed movement. Do you try to use the difference only to see how it performs? The idea is similar to using the optical flow to describe the motion.\n 4. The improvement of performance does not represent extract nuance, it may be due to overfitting on some non-gait-related objects. It is better to use an attention map or cross-domain evaluation to show the effectiveness.\n 5. There is a lack of analysis about why this design is good and why it works well." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Addressing issues related to novelty, generalizability, efficiency metrics, and broader comparative analysis would further strengthen this paper's impact." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- LGDiffGait employs a dual-level differentiation approach with a Temporal Alignment Module (TAM) that captures both subtle and broad temporal features, ensuring cohesive alignment across sequences.\n\n- Sufficient Comparisons. \n\n- Visualizations using t-SNE and Grad-CAM effectively illustrate the model’s attention to key regions, particularly in capturing dynamic limb movements, which enhances interpretability." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces LGDiffGait, a framework for gait recognition that utilizes Local and Global Difference Modules (LDM and GDM) to capture fine-grained and broad temporal features from silhouettes. A Temporal Alignment Module (TAM) further aligns these features across sequences, resulting in state-of-the-art performance on multiple benchmark datasets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- **Broader Comparison with Temporal Methods**: Expanding comparisons with other temporal methods would better contextualize LGDiffGait’s specific advantages, situating it within the broader landscape of temporal gait recognition models.\n\n- **Validation of Temporal Method Generalizability**: To fully assess the generalizability of the temporal methods (LDM, GDM, and TAM), applying these modules to various baseline models (e.g., GaitBase and DeepGaitV2) would provide a clearer demonstration of their adaptability and effectiveness across different architectures.\n\n- **Lack of Efficiency Metrics**: The absence of parameter and FLOP metrics limits understanding of the model’s computational demands, which would be valuable for assessing its scalability and efficiency.\n\n- **Poor Novelty** The community may find it hard to get some ideas new from the manuscript. The local and global shifted (diff) temporal modeling has been discussed many times in previous works[1, 2, 3]. The authors have made much effort in this topic by still have not achieved impressive enough performance improvements among all the employed datasets. \n\n[1] Lin et al, GaitGL at ICCV2021\n[2] Lin et al, MT3D at MM2020\n[3] Zheng et al, MSTGait at MM2023" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "LGDiffGait captures local and global movement differences in gait sequences for more accurate gait recognition." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024lgdiffgait,\ntitle={{LGD}iffGait: Local and Global Difference Learning for Gait Recognition with Silhouettes},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ZhUKd05QM},\nnote={under review}\n}" }, "abstract": { "value": "The subtle differences between consecutive frames of a gait video sequence are crucial for accurate gait identification, as they reflect the distinctive movement of various body parts during an individual’s walk. However, most existing methods often focus on capturing spatial-temporal features of entire gait sequences only, which results in the neglect of these nuances. To address the limitation, in this paper, we propose a new approach, named Local and Global Difference Learning for Gait Recognition with Silhouettes (LGDiffGait). Specifically, the differences within gait sequences are explicitly modeled at two levels: local window-level and global sequence-level. For the local window-level, we apply sliding windows along the temporal dimension to aggregate the window-level information, and the local movement is defined as the difference between pooled features of adjacent frames within each window. For the global sequence-level, global pooling across the entire sequence is employed, which is followed by subtraction to capture overall movement differences. Moreover, after difference feature learning, we develop a temporal alignment module to align these extracted local and global differences with the overall sequence dynamics, ensuring temporal consistency. By explicitly modeling these differences, LGDiffGait can capture the subtle movements of different body parts, enabling the extraction of more discriminative features. Our experimental results demonstrate that LGDiffGait achieves state-of-the-art performance on four publicly available datasets." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Gait Recognition; Movement Difference Modeling; Temporal Modeling" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/b2e3341f0fa326e90c52c6cf35d1bb4e2918256a.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "LGDiffGait: Local and Global Difference Learning for Gait Recognition with Silhouettes" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4a9doRh3Jv
Fast and Slow Generating: An Empirical Study on Large and Small Language Models Collaborative Decoding
main
Active
Large Language Models;Collaborative Decoding
foundation or frontier models, including LLMs
3;6;6;8
3;4;3;4
1;3;2;3
2;2;3;4
1;2;3;3
5.75
3.5
2.25
2.75
2.25
0.70014
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "How generalizable do you believe your findings are to other language tasks or domains?\nHow do you think the collaborative patterns might change, If different sampling technique is used." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Originality:- The paper introduces a novel FS-GEN framework.\nQuality:- The tables and figures are very well used.\nThe paper is written with a great clarity. \nsignificance:- The paper compares from smaller models to larger ones, based on the number of parameters." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper explores collaborative decoding strategies between large language models (LLMs) and small language models (SLMs). The authors introduce the FS-GEN framework, categorizing LLMs as System 2 (slow and deliberate) and SLMs as System 1 (fast and intuitive). The research focuses on decoding methods like speculative decoding, contrastive decoding, and proxy tuning to improve efficiency and mitigate issues like high inference time." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Could provide more discussion of practical applications.\nTrade-offs between the inference time and cost can be a great addition. \nThe experiments focused on only few tasks like:- MMLU-STEM, GSM8k, and MBPP, Having experiments over domain specific datasets can give a better understanding." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Related work is pushed to the Appendix. This is a strange choice. I understand there might have been a space crunch, but Related Work makes much more sense to be in the main paper." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Paper studies a relatively under explored but important and emerging area of research.\nThe findings are interesting, particularly the 2:8 law, collaborations being most necessary at the beginning of decoding and that high uncertainty tokens within System 1 are more likely to require collaboration.\nSome of the findings could spur targeted research in the field of collaborative decoding.\nExperimental benchmarks cover different capabilities like knowledge, math and coding, as well as two LLM families." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper studies collaborative decoding, where small language models and large language models work together in the decoding process. In particular, the paper offers a unifying perspective on 3 different collaborative decoding techniques: proxy tuning, speculative decoding and contrastive decoding. Authors categorize the larger model as System 2 and smaller model as system 1.\nThe paper studies the 3 techniques, their commonalities and differences through their framework FS-GEN (Fast and Slow Generating).\nThey find that only small fraction of decoding steps require collaboration and that System 1 and 2 follow a scaling law related to parameter ratios." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The System 1 and System 2 analogy is not well fleshed out, to the point where it feels more like a distraction from the main contributions.\n\nThe line fits on the param ratio scaling plot aren't very convincing.\n\nThe uncertainty analysis is only qualitative - quantitative metrics to support this hypothesis (covering different tasks and model families) are missing. Without them its hard to have confidence in this finding." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- It is not clear what exactly is being illustrated in Figures 11, 12, and 13. What are the different features? \n- How does one use the insights from this paper for contrastive decoding and proxy tuning?\n- Currently, greedy decoding is used to establish whether collaboration is required or not. I wonder if the next token perplexity could be another measure." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Proposes a new framework to analyze the collaborative behavior between models\n- Empirical results shed new light on this collaborative behavior. In particular, the scaling law for collaboration and frequent positions of collaboration are quite interesting." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper analyzes the patterns of collaboration between SLMs and LLMs when used in a collaborative decoding/training setup. By analyzing this behavior across multiple collaboration setups, tasks, and model families, the authors draw the following conclusions:\n- The collaboration frequency peaks at about 20%, with the maximum collaboration happening when there's the biggest gap in the model sizes. In fact, there's an inverse scaling law connecting the model size ratio and the collaboration frequency (more clearly evident for Qwen models than Pythia). \n- Most of the LLMs/System 2 interventions are required at the start of the decoding and for tokens for which SLMs are uncertain." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper analyzes speculative decoding, contrastive decoding, and proxy tuning. Except for speculative decoding, it's not clear if the analysis provides any executable insights for the other two setups. \nDrawing questionable analogies with human cognitive processes just because one model runs fast and the other slow and then commenting about how the collaborative distributions are different (L127-L129) is extremely flawed reasoning. The analogy doesn't make sense, except for the fact that one model is faster and the other is slower. \n\nComments about writing:\n- Why is O_g being used and not O_f for p_f (fused logits) in Section 2.2\n- L053: \"allow\" -> \"allows\"\n- L195: \"produce\" -> \"produced\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "Could you elaborate on the motivation of using system 1 - system 2 reasoning for collaborative decoding with SLMs and LLMs, specifically?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The paper asks an interesting question and presents several findings. The idea to take inspiration from system 1 and system 2 is interesting." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents an investigation into collaborative decoding between small and large language models, attempting to formalize it from the perspective of a system 1 / system 2 collaboration, where system 1 operates quicly and intuitively, while system 2 functions in a more slow and deliberate manner. The paper focuses on the differences between system 1 and 2 in the context of decoding, when system 1 would underperform compared to system 2 and how efficiency of the compound system can be improved. For their investigation, the authors use the Qwen and Pythia series. To evaluate the system, they consider MMLU-STEM, GSM8k and MBPP. The analysis focusses on two aspects of collaboration: frequency and position, where the former refers to how often the models should interact, where as the second one refers to the specific points of interaction. They find thta collaborative interactions are most critical at the beginning of the generation, and that the optimal frequency is around\n 80-20, depending on the task." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My main qualm with the work is the presentation of the paper, which almost reads like a slide deck: plenty of conclusions and graphics, but little to no details about how the experiments are actually set up or how the conclusions are drawn. I also don't see any evidence of how well the collaborative decoding actually works (that is, there are no accuracy scores reported), and how that may depend on the frequency or place of collaboration). The many figures are hardly described. There is also no discussion of how the results are different between the benchmarks and whether that may make sense given the topics.\n\nLastly, while I like the idea of interpreting collaborative decoding as a system-1 system-2 scenario, but the current work does not really convince me that it makes sense to explore collaborative decoding with SLMs and LLMs in this way. Wouldn't LLMs be better both at the intuition and the deliberate reasoning?\n\nIn sum, it could be that the paper contains many interesting results, but if so, the current presentation does not do them justice.\n\nNB: the related work section is in the appendix and is not even referred to" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024fast,\ntitle={Fast and Slow Generating: An Empirical Study on Large and Small Language Models Collaborative Decoding},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4a9doRh3Jv},\nnote={under review}\n}" }, "abstract": { "value": "Large Language Models (LLMs) exhibit impressive capabilities across various applications but encounter substantial challenges such as high inference latency, considerable training costs, and the generation of hallucinations. Collaborative decoding between large and small language models (SLMs) presents a promising strategy to mitigate these issues through methods including speculative decoding, contrastive decoding, and emulator or proxy fine-tuning. However, the specifics of such collaborations, particularly from a unified perspective, remain largely unexplored. Inspired by dual-process cognitive theory, we propose a unified framework in this paper, termed Fast and Slow Generating (FS-GEN). Within this framework, LLMs (sometimes along with SLMs) are categorized as System 2 (slow and deliberate), while independent SLMs are designated as System 1 (fast and intuitive). We provide a comprehensive analysis of these collaborative methodologies, elucidating their common properties and shedding light on the differential knowledge capabilities of System 2 versus System 1 through the FS-GEN framework. Our findings indicate that only a small proportion of collaborative interactions (approximately less than 20\\% in most instances) are necessary across various methods. These interactions between System 1 and System 2 conform to a scaling law related to the parameter ratios, enabling predictable collaboration. Furthermore, we explore the specific conditions under which collaboration proves most effective, particularly from an uncertainty perspective, offering novel insights that may guide future optimization efforts. Our research underscores that the fundamental distinction between System 1 and System 2 lies in the uncertainty of next token predictions, where interventions by System 2 are crucial to support System 1." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Large Language Models", "Collaborative Decoding" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/9d9f7db0df6ccb67e86a7d507210f6c6a4261551.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Fast and Slow Generating: An Empirical Study on Large and Small Language Models Collaborative Decoding" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4aWzNhmq4K
Choose Your Anchor Wisely: Effective Unlearning Diffusion Models via Concept Reconditioning
main
Active
Machine Unlearning;Diffusion Models.
alignment, fairness, safety, privacy, and societal considerations
3;3;6
4;5;4
2;2;3
2;1;3
2;3;3
4
4.333333
2.333333
2
2.666667
-0.5
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "Yes, Legal compliance (e.g., GDPR, copyright, terms of use)" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The explanation of the key differences from other methods, along with the experimental results, solve most of my questions. I have no further questions aside from those mentioned before." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The experimental setup is well-structured, effectively addressing the majority of my inquiries regarding this method. \n2. The performance outcomes appear to be satisfactory. \n3. The writing is commendable; the method is articulated clearly, and its key distinctions from other approaches are clearly stated." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a novel method, termed CORE, designed for the unlearning of diffusion models by selectively eliminating undesirable knowledge. The proposed approach includes an innovative strategy for anchor selection and a newly formulated retain loss. Experimental results demonstrate the method's superior performance compared to existing techniques." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The visual results presented are insufficient. I am particularly interested in scenarios where the forget concepts and the retain concepts contain the same object but differ in adjectives. For instance, in Figure 3, \"Dadaism *Cat*\" is expected to be forgotten, while \"Vibrant Flow *Cat*\" should be retained. Could you provide additional visual results for this kind of situation?\n2. Ablation study. Without the retain loss, how much worse will the model be?\n3. In line 230, the statement \"In the unlearning objective, p_a acts as an anchor concept to recondition images from the forget set onto\" appears incomplete. It seems that there is a missing component following the word \"onto.\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the weakness part." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper writing is well.\n2. Machine unlearning is an interesting topic and studing how to unlearn some concepts in SD model is important." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work proposes Concept REconditioning (CORE), a simple yet effective approach for unlearning harmful, sensitive, or copyrighted content from diffusion models. The key contribution lies in the selection of anchor concepts and the retain loss. Extensive experiments demonstrate that CORE surpasses state-of-the-art methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The proposed method appears rather trivial. The author simply presents a pairing method of anchor and forget concepts (either from the retain set or other forget concepts) within the unlearning objective of Concept Ablation (CA)[1]. This is highly engineering-focused and lacks adequate innovation. The proposed retaining loss only transitions from predicting Gaussian noise to aligning with the prediction of the pretrained model. Although experimentally proven effective by the author as indicated in Table 3, the author does not discuss this aspect in sufficient depth, and it is regarded as a relatively minor improvement.\n\n2. There is a deficiency in the comparison with some state-of-the-art methods in the experiments [2, 3, 4].\n\n3. The experiments lack comparisons with more models. For example, SD v1.4, which is commonly employed by previous methods, and larger models like SD - XL. Additionally, there is a lack of results validating the retaining effect on large-scale datasets, such as COCO - 30K.\n\n4. The visualization results do not utilize the commonly used prompts adopted by previous works [1][2], making it difficult to demonstrate advantages over previous efforts. Moreover, the retained concepts also exhibit changes in the image content, as seen in Figure 2.\n\nReferences:\n[1] Ablating Concepts in Text-to-Image Diffusion Models\n[2] One-Dimensional Adapter to Rule Them All: Concepts, Diffusion Models and Erasing Applications\n[3] To generate or not? safety-driven unlearned diffusion models are still easy to generate unsafe images... for now\n[4] Unified Concept Editing in Diffusion Models" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1.How does the CORE method perform on other content, such as specific entities or specific concepts?\n2.Why can't CORE be directly applied to SD1.5 and instead requires fine-tuning on UnlearnCanvas? From my personal experience, fine-tuning SD1.5 leads to significant changes in its performance, and unlearning on a fine-tuned model makes it relatively easier to control its performance on other non-unlearning concepts. However, this shouldn't reflect the actual scenario." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. This paper produces COncept REconditioning (CORE), a new efficient and effective unlearning method on diffusion models.\n2. Extensive tests on UnlearnCanvas demonstrate that CORE surpasses existing baselines, achieving near-perfect scores and setting new state-of-the-art performance for unlearning diffusion models. CORE also exhibits strong generalization in unlearning styles.\n3. The ablation studies in paper show that the benefits of using a fixed, non-trainable target noise over other unlearning methods." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces concept reconditioning (CORE), a simple yet effective approach for unlearning diffusion models. By guiding the noise predictor conditioned on forget concepts towards an anchor generated from alternative concepts, CORE surpasses state-of-the-art methods including its close variants and achieves nearperfect performance, especially when CORE aim to forget multiple concepts. The difference between CORE with other existing approaches is the choice of anchor and retain loss." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The entire paper feels quite redundant. The related work section and Chapter 2 cover the same material. The content after line 294 in Section 3.2 seems to repeat what was mentioned earlier.\n2. The paper mentions various unlearning concepts, such as privacy and explicit content, but in practice, it only focuses on style. The paper claims generalization as one of its contributions, so how is this demonstrated? Or is CORE only applicable to style unlearning?\n3. The paper compares many unlearning methods, but there is only one figure (Figure 2) showing the actual results, and each concept has just one result. The presentation of the outcomes is too sparse. Although the tables show some differences between the models, I still think some of the redundant content could be reduced to include more actual results.\n4. In addition to the fact that the methods for removing concepts mentioned in the paper are not comprehensive, there are also methods described in references [1] and [2].\n【1】.Ni Z, Wei L, Li J, et al. Degeneration-tuning: Using scrambled grid shield unwanted concepts from stable diffusion[C]//Proceedings of the 31st ACM International Conference on Multimedia. 2023: 8900-8909.\n【2】.Patrick Schramowski, Manuel Brack, Björn Deiseroth, and Kristian Kersting. 2023. Safe latent diffusion: Mitigating inappropriate degeneration in diffusion models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 22522–22531." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We introduce COncept REconditioning (CORE), a simple yet effective approach for unlearning in diffusion models." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024choose,\ntitle={Choose Your Anchor Wisely: Effective Unlearning Diffusion Models via Concept Reconditioning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4aWzNhmq4K},\nnote={under review}\n}" }, "abstract": { "value": "Large-scale conditional diffusion models (DMs) have demonstrated exceptional ability in generating high-quality images from textual descriptions, gaining widespread use across various domains. However, these models also carry the risk of producing harmful, sensitive, or copyrighted content, creating a pressing need to remove such information from their generation capabilities. While retraining from scratch is prohibitively expensive, machine unlearning provides a more efficient solution by selectively removing undesirable knowledge while preserving utility. In this paper, we introduce \\textbf{COncept REconditioning (CORE)}, a simple yet effective approach for unlearning diffusion models. Similar to some existing approaches, CORE guides the noise predictor conditioned on forget concepts towards an anchor generated from alternative concepts. However, CORE introduces key differences in the choice of anchor and retain loss, which contribute to its enhanced performance. We evaluate the unlearning effectiveness and retainability of CORE on UnlearnCanvas. Extensive experiments demonstrate that CORE surpasses state-of-the-art methods including its close variants and achieves near-perfect performance, especially when we aim to forget multiple concepts. More ablation studies show that CORE's careful selection of the anchor and retain loss is critical to its superior performance." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Machine Unlearning", "Diffusion Models." ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/f6c0ac1d310a590cc66d34cbc1576b8cc7ff1d11.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Choose Your Anchor Wisely: Effective Unlearning Diffusion Models via Concept Reconditioning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4anfpHj0wf
Unlocking Point Processes through Point Set Diffusion
main
Active
Generative Model;Diffusion Model;Set Model;Point Sets;Forecasting;Density Estimation;Spatial;Temporal;Probabilistic Models
generative models
5;6;6;8
5;3;4;3
3;3;3;3
2;3;3;3
4;3;3;4
6.25
3.75
3
2.75
3.5
-0.760886
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. In Figure 5, can the authors show the predicted density of different trajectories in the same masked area?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The idea of using the diffusion-style model to characterize point processes is super interesting. The content is clear and well-written, making the methodology and the results accessible to the reader. The paper also covers unconditional and conditional sampling methods, which have the potential to correspond to two important questions in the point process modeling (first-order and second-order modeling). The authors also provide thorough experimentation to validate the effectiveness of the proposed model." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel modeling approach to point processes via diffusion on point sets (discrete events), addressing the reliance on the intensity function when establishing or learning the model. It can capture the distribution of point processes and generate a series of events based on noise point sets. Meanwhile, the sampling efficiency of point set diffusion is superior. The overall presentations of both the methodology and experiments are excellent." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "In my opinion, the main weakness, or the most improvement-needed part of the paper, lies in the modeling and experiments of ordered point processes:\n\n1. An important characteristic of the ordered point processes (TPPs or STPPs) is the dependence between future events and past events, which is not considered in the model. The proposed method seems to only consider the first-order statistics of the data (the event intensity/density), and treat these statistics at certain times or locations as fixed values to be learned by the model. For example, if the training data set contains multiple event trajectories sampled over the horizon of $[0, T]$, then the model will assume $p(T/2)$ (the event density at $T/2$) is fixed and to be learned. However, in TPPs, the $p(T/2)$ depends on the history (observation before $T/2$), and is different in each realization of the event trajectory, which violates the assumption of the diffusion model.\n\n2. Although a conditional sampling method is proposed in the paper (Algorithm 3.1), I am wondering about its effectiveness in practice. First, what the $q(X_{t-1}|X_{0}^{c})$ is (line 287) remains unknown. Meanwhile, the (technical/practical) reason for using this conditioning is not shown. The results in Figure 7 are not convincing enough. To me, even though the authors claim that they are solving conditioning tasks and are visualizing the predicted densities for events from different trajectories (panels at the bottom), these density plots would look similar if we overlap them with each other. In other words, I think the model only predicts an averaged event intensity over space, and it has little connection with the conditioned samples.\n\n3. An alternative to prove the effectiveness of conditional generation is to show the predicted intensity/density of events at different times, given a trajectory from the pinwheel dataset. This is the same idea as Figure 5 in [1]. The difference between density functions at different times is more significant and would help validate the conditional sampling method.\n\n4. The conditional sampling task is only experimented with in the spatial domain. An example of showing the evolution of the predicted conditional density of a pinwheel trajectory can support the claim of effective conditional sampling in an ordered (temporal) domain.\n\n5. I am also concerned that there is no log-likelihood metric reported in the paper. The metrics used in the paper are about the first-order characteristics of the data, on which I believe the proposed Point Set Diffusion can perform well. However, they cannot fully reflect the model's fit to the data when second-order data dependencies are involved (e.g., in ordered point processes). On the other hand, the log-likliehood is still the golden standard to suggest the model's goodness-of-fit to the data when it comes to conditional models or tasks [2][3]. Other point process studies that use the diffusion model will also report the data log-likelihood when evaluating the model [4][5]. I am curious about the proposed model's performance on the log-likelihood metric.\n\nAgain, I acknowledge and respect the authors' contribution to the proposed method, and I hope the above questions can be properly answered or addressed.\n\n---\n[1] Chen, Ricky TQ, Brandon Amos, and Maximilian Nickel. \"Neural Spatio-Temporal Point Processes.\" International Conference on Learning Representations.\n\n[2] Daryl J Daley, David Vere-Jones, et al. An introduction to the theory of point processes: volume I: elementary theory and methods. Springer, 2003\n\n[3] Reinhart, Alex. \"A review of self-exciting spatio-temporal point processes and their applications.\" Statistical Science 33.3 (2018): 299-318.\n\n[4] Dong, Zheng, Zekai Fan, and Shixiang Zhu. \"Conditional Generative Modeling for High-dimensional Marked Temporal Point Processes.\" arXiv preprint arXiv:2305.12569 (2023).\n\n[5] Yuan, Yuan, et al. \"Spatio-temporal diffusion point processes.\" Proceedings of the 29th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 2023." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. The sampling time and quality of the diffusion model are directly related to the number of forward/backward steps, which I could not find in the paper. Could the authors provide some ablation study on the number of steps, e.g., how the sampling time and quality grow with it?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is overall well-written and easy to follow. The basic concepts are introduced clearly with consistent notations. The forward process, backward process, and final sampling algorithms are well explained. Illustrations (Figure 1-3) are very clear for readers to follow the workflow of the proposed Point Set Diffusion model. The datasets and metrics are also clear in the experiment section. \n\n2. The idea of leveraging diffusion models to generate the whole point process is intriguing, and it is quite different from the common approaches that use autoregressive models with parameterized intensity functions that suffer from sampling speed and are restricted to forecasting tasks. Numerical results are very promising to support the efficacy of the proposed model." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a Point Set Diffusion model for conditioned and unconditioned generations of point processes (spatial, temporal, and spacial-temporal) without intensity functions. The model treats the latent space of the point process as a whole and applies diffusion to learn how to generate point processes from noise (unconditioned) and conditioning masks (conditioned). At the training phase, the point process is passed through a forward process that gradually thins the original points and adds points from a noise point process. Then, a parameterized model is trained for the backward process that gradually predicts the points in the last timestep conditioned on the current timestep and thins the noise points in the current point process. After training, both conditioned and unconditioned sampling procedures are provided. Numerical experiments illustrate that the proposed Point Set Diffusion model achieves much faster sampling speed than intensity-based autoregressive models. Moreover, it outperforms several baseline autoregressive models on various SPP and STPP tasks, especially density estimation tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Currently there are very few baseline algorithms, e.g., for SPP conditional generation there is only one baseline, and for STPP forecasting there are only two. It would be more convincing to compare with more baseline models, or to provide more evidence that the current baselines are already SOTA (which I believe they are)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. In the experiments, how are $\\alpha_t$, $\\beta_t$, and $T$ set?\n\n2. The proposed model generalizes the Add-Thin model to general metric spaces. When modeling univariate TPPs, how does the performance of the proposed model compare to Add-Thin in both unconditional and conditional sampling scenarios?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper generalizes the Add-Thin model to define a model for point processes on general metric spaces, enhancing the model's applicability and promising future prospects.\n2. The idea is sound and well-founded. The paper is overall well-written and easy to follow.\n3. Experiments show that the proposed model achieves state-of-the-art results on both conditional and unconditional tasks while enabling faster sampling." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a diffusion-based latent variable model for general point processes on metric spaces. By learning stochastic interpolations between data and noise point sets, the model enables efficient, parallel sampling and flexible generation for complex tasks on the metric space. Experiments on synthetic and real-world datasets show that the proposed model achieves state-of-the-art results in unconditional and conditional tasks for spatial and spatio-temporal point processes." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. It would be helpful to discuss the connections between the proposed model and the Add-Thin model when modeling univariate temporal point processes.\n2. In the conditional sampling, the definition of $q(X_{t-1} | X_{0}^c)$ in line 287 was not provided.\n\nTypo: $X_{t+1}^{\\text{thin}}$ and $X_{t}^{\\text{thin}}$ in Eq.(9) should be $X_{t+1}^{\\varepsilon}$ and $X_{t}^{\\varepsilon}$." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Can you add a discussion on learning time? How does the computational complexity increase, especially with more data points?\n- How can hyperparameters (e.g. number of diffusion steps T or noise scheduling) be determined?\n- Please tell me more about the limitation of the proposed method. For example, how robust is the proposed method in situations where there is little data? Also, will the interpretability of the proposed method be lower than parametric methods (e.g., DNN-based Hawkes processes), or will the number of sensitive hyperparameters increase by using diffusion models as a base?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The approach of modeling point processes using the diffusion model is interesting.\n- Efficient sampling is achieved by making effective use of thinning.\n- The effectiveness of the proposed method is evaluated on artificial and real data.\n- The manuscript is well-written." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel diffusion-based approach to model point processes without relying on traditional intensity functions. This model is characterized by its ability to efficiently and flexibly generate point sets through stochastic interpolation between data and noise sets. Experiments on synthetic and real-world datasets demonstrate that the model achieves state-of-the-art performance in generating spatial and spatiotemporal point processes, significantly outperforming existing methods in terms of speed and accuracy of sample generation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- There is not enough discussion about computational complexity.\n- Not very clear on how to set hyperparameters.\n- No mention of the effectiveness of the method with respect to the amount of data.\n- No discussion of limitation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024unlocking,\ntitle={Unlocking Point Processes through Point Set Diffusion},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4anfpHj0wf},\nnote={under review}\n}" }, "abstract": { "value": "Point processes model the distribution of random point sets in mathematical spaces, such as spatial and temporal domains, with applications in fields like seismology, neuroscience, and economics. Existing statistical and machine learning models for point processes are predominantly constrained by their reliance on the characteristic intensity function, introducing an inherent trade-off between efficiency and flexibility. In this paper, we introduce Point Set Diffusion, a diffusion-based latent variable model that can represent arbitrary point processes on general metric spaces without relying on the intensity function. By directly learning to stochastically interpolate between noise and data point sets, our approach enables efficient, parallel sampling and flexible generation for complex conditional tasks defined on the metric space. Experiments on synthetic and real-world datasets demonstrate that Point Set Diffusion achieves state-of-the-art performance in unconditional and conditional generation of spatial and spatiotemporal point processes while providing up to orders of magnitude faster sampling than autoregressive baselines." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Generative Model", "Diffusion Model", "Set Model", "Point Sets", "Forecasting", "Density Estimation", "Spatial", "Temporal", "Probabilistic Models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/4f65b03d7e4ffbc6607c1fbe4520ee17564b6a43.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Unlocking Point Processes through Point Set Diffusion" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4b1cJHn7q5
Enforcing 3D Topological Constraints in Composite Objects via Implicit Functions
main
Active
Topology; 3D Reconstruction; Implicit functions; Composite Objects
applications to computer vision, audio, language, and other modalities
3;3;3;5
5;5;5;4
2;2;3;3
1;2;3;3
2;4;3;4
3.5
4.75
2.5
2.25
3.25
-1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- Please expand and compare the used losses with previous losses used for topologically aware segmentation in the medical imaging (and potentially other) literature. Detail why and how the specific losses proposed here are unique, and particularly effective for the task at hand. These claims should be backed up experimentally.\n- The authors should comment on the potential bias such a prior induces upon outputs in pathological cases. Ideally, this would also be backed up experimentally. Could this prior be determined on a patient-specific basis, or based on other factors besides a specific pre-defined overlap?\n- Please clarify the refinement of the nn-Unet segmentations using the latent SDF representation, as this part is not detailed clearly in the paper." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- S1: The work is interesting and approaches a worthwhile topic in the subdomain of medical image analysis. The authors aptly note that existing multi-organ segmentation methods do not consider topological constraints between different sets of organs. While the surface can be reconstructed to avoid local artefacts, constraints between organs with specific priors cannot be easily specified. Based on this, they propose several loss functions based on surface-aware Monte Carlo sampling that propose a correct behaviour (contact, non-contact, non-intersection) between two shape pairs.\n- S2: The presentation is compelling and professional; there are no major typos, and the figures are nicely constructed.\n- S3: The method works well with respect to the baseline nn-Unet, and is particularly impressive with respect to out-of-distribution data. The authors also show that using deep SDFs for this task generates superior overlap estimates than converting the outputs to meshes." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose a method for 3D organ reconstruction with regard to pre-defined topological constraints. The core of the proposed approach is a global Monte Carlo sampling that evaluates the relationship between signed distances for two organs to estimate their relationship. In contrast, previous works only consider local constraints, e.g. non-intersection of different parts, but cannot evaluate the global contract ratio of two sub-organs. The authors evaluate their method on both multi-organ cardiac and spine datasets, but emphasize applicability to other organs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- W1. From my understanding, the two major contributions are the way of regularizing the multi-organ reconstruction approach loss functions constructed through surface aware Monte Carlo sampling, and using deep SDF as a representation for this task. Numerous loss functions for regularizing with respect to surface contact have been explored over the years [1,2,3]. Some others were designed for 2D but obey the same principles of (lack of) intersection and contact as explored here.\nMy main concern is that the method evaluation is limited to nn-Unet and fitting the SDFs to each organ individually. There are other losses that have been used to regularize topological consistency of medical organs; the authors should compare to these, as is I don’t think the experimental aspects of this paper do justice to the previous literature on this topic.\nWhile the authors mention the closely related method by Gupta et al [1], they discard it in the introduction as it only handles local constraints, and cannot be used to enforce global organ contact priors. However, specifically with respect to my later point (see W2), such an approach may bias segmentations less. \n- W2. Medical relevance is not explored despite being the primary motivation for this work. Enforcing a certain pre-specified level of contact between organ pairs is certainly useful for healthy patients, but in pathological cases one might specifically seek to find violations or deviations from such an overlap. The authors should at the very least mention how these losses might bias predictions towards reconstructions that mimic healthy organs. The paper would be much stronger and application relevant if this were explored.\n- W3. In the introduction the authors state (L98-99) that the latent vector of the 3D SDF is used to refine the segmentation outputs of the nnUnet. However, despite showing experiments how this approach is superior, they never actually detail how this is achieved.\n\n[1] Gupta, S., Hu, X., Kaan, J., Jin, M., Mpoy, M., Chung, K., ... & Chen, C. (2022, October). Learning topological interactions for multi-class medical image segmentation. In *ECCV.*\n[2] Ganaye, P. A., Sdika, M., Triggs, B., & Benoit-Cattin, H. (2019). Removing segmentation inconsistencies with semi-supervised non-adjacency constraint. *Medical image analysis*, *58*, 101551.\n[3] Reddy, Charan, Karthik Gopinath, and Herve Lombaert. \"Brain tumor segmentation using topological loss in convolutional networks.\" (2019)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please refer to the weakness part.\nAdditionally, the abdomen dataset should be a perfect fit for this work as abdomen region contains multiple organs and they are close to each other." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This work targets on exploring the shape constraints for reconstruction the organs for the human scan. \n2. In this paper, the authors propose two shape constraints, one is contact ratio and another is the minimum distance. Several straightforward losses are introduced to keep the desired contact ratio and distance by optimization. \n3. The writing is clear and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work focuses on resolving the shape contacting issue using the optimization in post-processing. Two constraints are proposed to regularize the shape representation: contact ratio and minimum distance between two shapes. By keeping the desired contact ratio and keeping the distance between shapes, the reconstructed 3D shape would be more precise with less penetration artifacts." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Utilizing the segmentation from the existing models and DeepSDF to fit the segmentation, the proposed method is specifically designed for the shape post-processing with knowledge from the previous steps. \n2. The P_contact and P_non-contact are from the overfitted DeepSDF representation. However, if the DeepSDF representation is not correct or the segmentation is not accurate enough, the P_contact points set are not correct. The optimization result therefore can not adjust the initial prediction and optimization result will have artifact. Please discuss how the method handles cases where the initial DeepSDF representation or segmentation is inaccurate. An analysis of the method's robustness to errors in the initial inputs would benefit. \n3. In the introduced loss function, the optimization only applied on the 3d shape representation, however, the optimized 3D shape might not consistent with the image after the optimization. Combined with last point, if the initial representation or segmentation is inaccurate, how the optimization could adjust the errors. Please include a discussion on potential methods to maintain this consistency or evaluate it quantitatively." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. Have you integrated this method into regularized versions of the deep signed distance (see section weaknesses)? Can you please report on the results? Here, the integration of a Lipschitz regularization would be one possible option.\n2. Can you provide a justification for the inclusion of these four particular loss functions beyond the ablation study? Are there particular theoretical frameworks or principles that should be applied to justify the loss function design?\n3. What is the additional runtime caused by the sampling?\n4. I am lacking details on the optimization in Section 3.2.3. Can you please provide them?\n5. In Figure 8, I can hardly recognize the distribution of the topologically meaningful points near the interfaces. Can you please present this in a better way?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The presentation of the paper is excellent, and all methods are clearly described. To the best of my knowledge, I have not seen the combination of the four loss functions in this way (although, I have encountered most (probably all) as separate loss functions elsewhere).\nMoreover, the research question itself (imposing topological constraints for DeepSDF) is highly significant.\nFinally, the numerical experiments are systematically conducted and (partially) underline the claims of the paper." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a concept to incorporate topological constraints for cardiac shape representation in the context of deep signed distance functions. The method is composed of several parts: sampling of topologically meaningful points, optimization of the sum of four loss functions, and the enforcement of minimum distance constraints. In several numerical experiments, the performance of the method is both qualitatively and quantitatively examined. In particular, an ablation study reveals that all four loss functions are essential to achieve the reported performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The reconstruction method itself builds upon rather old publications by Park and Isensee, thereby completely ignoring the regularized deepSDF approaches with their substantially improved reconstruction quality (e.g., \"Reconstruction and completion of high-resolution 3D cardiac shapes using anisotropic CMRI segmentations and continuous implicit neural representations.\" by Sander et al., \"Sdf4chd: Generative modeling of cardiac anatomies with congenital heart defects.\" by Kong et al. or “Shape of my heart: Cardiac models through learned signed distance functions” by Verhülsdonk et al.). In particular, these regularized versions are proven to preserve topological constraints better. A systematic benchmark with some of these recent approaches is required instead of only considering \"old\" approaches.\nMoreover, the design of the four loss functions is entirely heuristic, any motivation or mathematical reasoning for this particular choice is completely lacking (only the ablation study partially underlines this specific choice).\nFinally, I suspect that the sampling requirements (300k points after 10 iterations) result in inferior run time (and maybe performance) compared to the above-mentioned regularized approaches." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "_see **Weaknesses** for key questions/remarks._" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "_(somewhat ordered from most to least important)_\n\n## S1. Clear Illustrations\n- The authors provide well-designed illustrations to convey their intuition/contributions (Fig. 2), as well as to share their qualitative results (e.g., by highlighting contact vs. interpenetration regions in Fig. 3).\n\n## S2. Motivation & Relevance\n- The implicit modeling of multi-component scenes is an under-explored topic. Most of the research in that direction focuses on human/object interaction scenarios, but the resulting solutions do not always transfer well to anatomical use-cases (e.g., due to rigidity assumptions).\n- Moreover, the authors' idea to condition the contact/distance losses based on medical prior is interesting and well-motivated.\n\n## S3. Decent Reproducibility\n- Even though the authors did not release their code, an expert in the art should be able to re-implement their work, i.e., extending the publicly-available DeepSDF implementation with the proposed multi-object losses." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "## Motivation\n- Deep implicit functions have emerged as a powerful solution for representing 3D shapes. \n- However, most of the focus has been put on single-object scenarios, ignoring topological constraints (contact enforcement, non-interpenetration, etc.) that may arise in multi-object applications, such as anatomical modeling.\n\n## Contributions\n- The authors extends existing neural SDF solutions [Park et al., 2019] to enforce non-interpenetration between different object categories (i.e., different anatomical entities), as well as to enforce user-defined surface contact ratio or surface distance. \n- This is achieved through the introduction of attraction-repulsion losses applied to a subset of 3D points meeting the contact constraints.\n\n## Results\n- The authors demonstrate their solution on two clinical use-cases: 3D whole-heart reconstruction (enforcing user-defined surface contact ratio between hear components) and lumbar spine reconstruction (enforcing user-defined minimal distance between vertabrae).\n- They compare to the original segmentation results (nn-Unet [Isensee et al., 2018]) as well as baseline DeepSDF [Park et al., 2019a], showing that their method succeeds in enforcing non-interpenetration and the user-defined constraints.\n- An ablation study, as well as well-presented qualitative results, are also provided." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "_(somewhat ordered from most to least important)_\n\n## W1. Lack of Relevant SOTA Comparison\n\n### W1.a. No Mention of Existing Multi-Organ DIF Works\n\n[L154-157] The authors claim that:\n\n> We focus on two different kinds of constraints—**neither of which has been\nconsidered in previous work**—in two distinct scenarios. First, when reconstructing the four chambers\nof the human heart, these chambers **should never intersect but instead should be in contact with\neach other** over a given percentage of their surface areas. [...]\n\nHowever, their novelty claim is heavily questionable. Even when focusing only on the narrow domain of implicit anatomical modeling, at least two papers [a, b] have already proposed contact and/or non-interpenetration losses. Similar losses have been proposed for other applications, e.g., human/object interaction modeling [c]. The fact that the authors neither compare to—nor even discuss—such prior art is problematic.\n\n### W1.b. Comparison to Baseline Only\nSimilarly, the authors only compare their method to a single other deep implicit function method, DeepSDF [Park et al., 2019]. This work is quite outdated and focuses on single-object scenarios. It is obvious that it would under-performed the proposed solution w.r.t. contact/interpenetration metrics. It would have been meaningful to compare the proposed method to (a) more recent implicit solutions targeting multi-object scenarios [a,b,c] ; or at least to (b) DeepSDF applied to modeling the entire scene (as one single multi-part object) rather than to multiple DeepSDF instances applied to each component.\n\n## W2. Superficial Contributions Compared to SOTA\n\n### W2.a. Attraction-Repulsion Losses Already Applied to Anatomical Modeling\n\nWith the above-mentioned prior art in mind, the contributions claimed in this paper appear rather shallow. Their only claims are the losses ensuring non-interpenetration, as well as enforcing surface contact or surface distance (depending on the scenario). While the idea to condition the contact/distance losses on user-defined values is novel, similar contact/repulsion functions already exist in the literature [a,b,c]. Due to the lack of comparison, it is also unclear how their formulation of the contact/inter-penetration losses fair compared to existing solutions.\n\n### W2.b Redundant Definition (?)\n\nThe self-intersection loss $\\mathcal{L}\\_{\\text{intersecting}}$ and contact-ratio loss $\\mathcal{L}\\_{\\text{contact}}$ proposed in this paper appears somewhat redundant, as well as highly similar to the loss $\\mathcal{L}^\\mathcal{C}$ proposed in [b], where it is defined as an \"attraction-repulsion\" function to ensure both non-interpenetration and contact of surfaces.\n\nSimilar to the current submission, the loss in [b] relies on the sampling of contact points (set $\\mathcal{C}$ in [b]), generalized to any number of surfaces (not just 2). The only contribution of the present paper is the weighting of the set size by the target user-provided contact ratio (a minor change, in my opinion).\n\nIndeed, if we define:\n\n$\\mathcal{A}\\_{\\text{contact}} = \\mathcal{A}\\_{\\text{intersecting}} \\cup \\mathcal{A}\\_{\\text{outside}} \\cup \\mathcal{A}\\_{\\text{single}}$, \n\nwith $\\mathcal{A}\\_{\\text{outside}}$ set of close points outside all objects and $\\mathcal{A}\\_{\\text{single}}$ set of points inside a single object, then:\n\n$\\mathcal{L}\\_{\\text{contact}} = \\sum\\_{x \\in \\mathcal{A}\\_{\\text{contact}}} |\\sum\\_{i \\in [a, b]} f(i, x)| $\n$ = \\sum\\_{x \\in \\mathcal{A}\\_{\\text{intersecting}}} |\\sum\\_{i \\in [a, b]} f(i, x)| + \\sum\\_{x \\in \\mathcal{A}\\_{\\text{outside}}} |\\sum\\_{i \\in [a, b]} f(i, x)| + \\sum\\_{x \\in \\mathcal{A}\\_{\\text{single}}} |\\sum\\_{i \\in [a, b]} f(i, x)|$\n$ = \\sum\\_{x \\in \\mathcal{A}\\_{\\text{intersecting}}} \\sum\\_{i \\in [a, b]} |f(i, x)| + \\sum\\_{x \\in \\mathcal{A}\\_{\\text{outside}}} \\sum\\_{i \\in [a, b]} |f(i, x)| + \\sum\\_{x \\in \\mathcal{A}\\_{\\text{single}}} |\\sum\\_{i \\in [a, b]} f(i, x)|$\n$ = \\mathcal{L}\\_{\\text{intersecting}} + \\sum\\_{x \\in \\mathcal{A}\\_{\\text{outside}}} \\sum\\_{i \\in [a, b]} |f(i, x)| + \\sum\\_{x \\in \\mathcal{A}\\_{\\text{single}}} |\\sum\\_{i \\in [a, b]} f(i, x)|$,\n\nc.f. $| x + y | = | x | + | y |$ if $\\text{sign}(x) = \\text{sign}(y)$\n\nHence $ \\mathcal{L}\\_{\\text{intersecting}}$ being redundant to $\\mathcal{L}\\_{\\text{contact}}$.\n\nMoreover, based on the above equation, we can also observe that:\n\n$\\mathcal{L}\\_{\\text{contact}} \\approx \\mathcal{L}^\\mathcal{C} + \\Delta\\mathcal{L}$,\n\nwith the main difference (if we ignore the sigmoid-based normalization added to the loss $\\mathcal{L}^\\mathcal{C}$ in [b]) being:\n\n$\\Delta\\mathcal{L} = \\sum\\_{x \\in \\mathcal{A}\\_{\\text{single}}} |\\sum\\_{i \\in [a, b]} f(i, x)| - \\sum\\_{x \\in \\mathcal{A}\\_{\\text{single}}} \\sum\\_{i \\in [a, b]} |f(i, x)|$. \n\nI.e., for points close to 2 objects but inside only one, the authors of [b] compute the sum of absolute SDF values, whereas the present authors compute the absolute sum of SDF values. I do not have the insight to know which is best (a comparison could be interesting), but I believe that the difference in terms of overall supervision is minor (since it concerns only a small subset of points, and since other losses such as $\\mathcal{L}\\_{\\text{data}}$ would have a more significant influence on those).\n\n## W3. Medical Grounding & Clinical Applicability\n- A key claim in this work is the enforcement of topological priors from the medical literature. However, the medical grounding is somewhat lacking. E.g., it is unclear where the authors got the 27\\% value used as surface contact ratio for left ventricle and left myocardium. Only one reference is provided w.r.t. heart anatomy [Buckberg et al., 2018], but the above number does not seem to actually appear in that referenced article (?). \n- One can also wonder what would be the actual clinical use for a method that forces the reconstruction to meet statistical constraints based on healthy populations. E.g., what happens for patient with a heart or spine condition? The authors do warn that \"_in this paper, we restrict ourselves to healthy subjects for whom this constraint must be satisfied._\" [L177-178] But they do not provide any insight on the clinical impact of this limitation.\n\n## W4. Minor - Methodology Not Always Clear\n- The contributions w.r.t. enforcing the contact ratio and w.r.t. enforcing the minimum distance appear severely disconnected (both in terms of methodology and in terms of actual application). The formalism of the corresponding losses could be better homogenized, e.g., by highlighting how the two losses constrain the range of valid distances (the contact loss enforce a maximum distance ; the distance loss enforce a minimum one).\n- The redundant definition of the point sets ($\\mathcal{A}\\_{\\text{contact}}, \\mathcal{A}\\_{\\text{non-contact}}, \\mathcal{A}\\_{\\text{intersecting}}$) is a bit confusing. I.e., is it useful to list these sets in [L221-223] if they are formally defined afterwards, [L255-258]?\n- The font style of the loss functions is not always consistent (.e.g, $\\mathcal{L}\\_{\\text{contact}}$ vs. $\\mathcal{L}\\_{contact}$).\n\n#### **Additional References:**\n\n[a] Zhang, Congyi, et al. \"An Implicit Parametric Morphable Dental Model.\" ACM Transactions on Graphics (TOG) 41.6 (2022): 1-13.\n\n[b] Liu, Yuchun, et al. \"Implicit Modeling of Non-rigid Objects with Cross-Category Signals.\" Proceedings of the AAAI Conference on Artificial Intelligence. Vol. 38. No. 4. 2024.\n\n[c] Hassan, Mohamed, et al. \"Synthesizing physical character-scene interactions.\" ACM SIGGRAPH 2023 Conference Proceedings. 2023." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We reconstruct 3D composite objects such as human hearts or lumbar spines and enforce topological constraints between each parts." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024enforcing,\ntitle={Enforcing 3D Topological Constraints in Composite Objects via Implicit Functions},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4b1cJHn7q5},\nnote={under review}\n}" }, "abstract": { "value": "Medical applications often require accurate 3D representations of complex organs with multiple parts, such as the heart and spine. Their individual parts must adhere to specific topological constraints to ensure proper functionality. Yet, there are very few mechanisms in the deep learning literature to achieve this goal.\n\nThis paper introduces a novel approach to enforce topological constraints in 3D object reconstruction using deep implicit signed distance functions. Our method focuses on heart and spine reconstruction but is generalizable to other applications. We propose a sampling-based technique that effectively checks and enforces topological constraints between 3D shapes by evaluating signed distances at randomly sampled points throughout the volume. We demonstrate it by refining 3D segmentations obtained from the nn-UNet architecture." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Topology; 3D Reconstruction; Implicit functions; Composite Objects" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/3c8f8d16cf06ca7dce4914dbab7c3ac1cf12cf69.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Enforcing 3D Topological Constraints in Composite Objects via Implicit Functions" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4bOCP1GtX4
WenXinGPT: A Multimodal Conversational Model for Enhancing Orthopedic Expert Consultations
main
Active
Multimodal conversational model;orthopedic expert consultations;medical visual language model;zero-shot scenarios;large language models
foundation or frontier models, including LLMs
3;3;3
5;4;4
2;2;1
2;3;1
1;2;2
3
4.333333
1.666667
2
1.666667
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Can you provide results on other medical datasets to demonstrate generalizability?\n\n2. Why were modern multimodal models not included as baselines? The current comparison against GPT-3.5 seems inappropriate for evaluating multimodal capabilities.\n\n3. The paper mentions using \"16 A100 GPUs (32GB)\" for training, but A100s only come in 40GB and 80GB variants. Could you what models were used?\n\n4. What specific advantages does your architecture provide over existing models like Llama 3 that already use GQA?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The authors create a comprehensive dataset covering 16 distinct categories of orthopedic surgery-related data from a medical institution. The dataset includes diverse medical information could be valuable for future research." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces WenXinGPT, a 7B parameter multimodal language model designed for orthopedic medical consultations in Chinese healthcare settings. The authors present a three-stage training process involving pretraining, domain-specific fine-tuning, and incorporation of multi-disciplinary expert consultations. The model is evaluated on medical data and compared against GPT-3.5 and XrayGPT using ROUGE." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper claims to be multimodal, stating \"WenXinGPT, a multimodal large model specifically designed for Chinese medical image diagnosis\" (Introduction), yet provides no technical details about visual processing or multimodal integration architecture.\n\nThe evaluation is fundamentally flawed, comparing a supposedly multimodal model primarily against GPT-3.5 (a text-only model) using only text-based ROUGE metrics. Despite citing MiniGPT-4, LLaVA, and mPLUG-Owl in the introduction, these more relevant multimodal baselines are absent from the evaluation.\n\nThe paper claims architectural innovations through NAS and GQA but provides no evidence these choices improve upon existing architectures like Llama that already use GQA. Testing is limited to a single institutional dataset, raising questions about generalizability." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "The authors state that all patient data has been desensitized to protect confidentiality and privacy; however, they do not provide further details or evidence to substantiate this claim. Hence, an ethical review is needed." }, "flag_for_ethics_review": { "value": [ "Yes, Privacy, security and safety" ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Comments: This paper represents valuable groundwork for healthcare applications in non-English languages, and I believe it addresses an important and necessary area. However, it currently lacks significant details that require attention.\nSuggestions:\nPlease address the points outlined in the Weakness section to enhance the paper's contribution. Specifically, expanding on evaluation metrics and experiments, ablation work to showcase the impact of MC, as well as expanding on the multi-turn approach, would greatly strengthen the technical contribution of this paper." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. This paper addresses a significant gap in non-English healthcare by introducing a multimodal orthopedic domain language model in Chinese.\n2. Introduced a novel MC approach that includes feedback from various experts in formalizing the final surgical plan.\n3. Incorporates multi-round discussion amongst medical professionals from different domains, thus aligning it closely with real-world medical consultations.\n4. Introduced a new dataset containing detailed categories of orthopedic surgery essential for future research in this domain." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces WenXinGPT, a multimodal LLM for orthopedic medical diagnoses in Chinese. This paper introduces a new dataset for orthopedic surgery and uses a Multi-Department Consultation framework to develop a comprehensive surgical plan." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Dataset details: Details on dataset size (number of tokens), high level statistical analysis, and dataset composition are lacking, including the specific datasets used and the proportions allocated for pretraining and fine-tuning (SFT).\n2. Evaluation Metrics: Evaluation relies solely on ROUGE scores, which is insufficient to capture essential aspects of medical report quality, such as interpretability and usability. Comparisons are limited to two other LLMs; additional comparisons to advanced models like Opus or GPT-4 would better contextualize the results. Results from GPT-based assessments also need to be included. The work will significantly benefit from human evaluations.\n3. Ablation Studies: The study needs an analysis of how NAS and MC strategies impact model performance, making the effectiveness of these approaches unclear.\n4. Implementation Details: Key implementation details, such as the prompts used for the MC framework and the tasks for supervised fine-tuning (SFT), must be included, impacting reproducibility.\n5. Multi-Turn Dialogue: The multi-turn interaction mechanism needs to be clearly explained, and the example provided needs to illustrate how multi-turn discussions are initiated or maintained sufficiently. \n6. Domain Focus and Generalizability: The choice to focus exclusively on orthopedics is not entirely justified, and there is limited discussion on the model’s adaptability to other medical specialties or non-Chinese datasets.\n7. Ethical Considerations: Information on handling Protected Health Information (PHI) in the dataset is incomplete, with no clear explanation of the PHI removal or validation techniques." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "No." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "I have strong concerns regarding to the three contributions that the authors mentioned: \n1. The authors mentioned the first contribution is to fill the gap of non-English-speaking healthcare LLMs. If so, why don't we just translate the existing English-based LLMs model to the target language? Will that lead to a decreased performance? \n2. Does 'multi-round interactive dialogue system' refer to the multi-agents? This should be more like a 'joint expert consultation' process rather than a 'multi-round interactive dialogue'. How is the 'consensus among the interdisciplinary team' reached? \n3. How is the 'underlying prompt system' involved in this research? This part is missing." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The use of multi-agents. This design makes the generation of diagnosis more like a 'joint expert consultation' process, improving the outputs' robustness and interoperability.\n2. The generation of the dataset. The dataset mentioned in the paper, if publically available, would be a good platform for future research." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces WenXinGPT, which incorporates multiple LLM agents on X-ray images for better clinical diagnosis. The idea of using multi-agents is interesting. However, there are major flaws that I will outline in more detail." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper is not well written. \n 1) For example, the authors mentioned in the contributions, 'implement an underlying prompt system'. However, this part is missing in the paper.\n 2) The dataset is not clearly introduced. How many records are in this dataset? How many participants are in this dataset? \n 3) How are the D_consultations (mentioned in the Training Pipeline) acquired? And how is the human feedback acquired for RLHF?\n\n2. Some of the contents are misleading. For example, the authors mentioned that they use a 'a 7-billion-parameter decoder-only LM', which turns out to be DeciLM-7B developed by others. Did the authors make modifications? Why don't citation the DeciLM-7B at the first time it appears? Did the authors develop GQA, NAS? Or just use the implementation the same as Deci? This needs to be clarified. \n\n3. The experimental design is not clear. For quantification evaluation (testing), which portion of data was used? How is the performance of BLEU scores? How are CoT, SC, and few-shot, zero-shot strategies implemented? Why just compare with GPT-3.5 and XtayGPT, instead of other general LLMs and medical LLMs? With Few-shot CoT + SC, the performance is better than WenXinGPT itself. How to further improve the performance of WenXinGPT? How is the 'consensus among the interdisciplinary team' reached in the example case?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024wenxingpt,\ntitle={WenXin{GPT}: A Multimodal Conversational Model for Enhancing Orthopedic Expert Consultations},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4bOCP1GtX4},\nnote={under review}\n}" }, "abstract": { "value": "Inspired by the hospital expert consultation model, this paper proposes a conversational medical visual language model for orthopedics, named WenXinGPT (Multi-disciplinary Collaboration). The core concept of this work focuses on aligning medical visual and textual representations to leverage high-quality data for generating expert consultation dialogues across hospital departments. The primary objective is to uncover orthopedic knowledge within medical intelligence models and enhance their reasoning abilities in an interpretable manner without requiring additional training. Our research particularly emphasizes zero-shot scenarios, and the results from experiments on 16 datasets provided by Peking Union Medical College Hospital demonstrate that the proposed WenXinGPT framework excels at mining and utilizing medical expertise within large language models, while also expanding their reasoning capabilities. Based on these findings, we conducted manual evaluations to identify and categorize common errors in our methods, along with ablation studies aimed at understanding the impact of various factors on overall performance." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Multimodal conversational model", "orthopedic expert consultations", "medical visual language model", "zero-shot scenarios", "large language models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/835a84f224a29b1bcb5173e2afc423728e63bd36.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "WenXinGPT: A Multimodal Conversational Model for Enhancing Orthopedic Expert Consultations" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4cQVUNpPkt
FOLEYCRAFTER: BRING SILENT VIDEOS TO LIFE WITH LIFELIKE AND SYNCHRONIZED SOUNDS
main
Active
Diffusion Model;Audio Generation;Video to Audio Generation
generative models
3;6;6
4;1;4
2;3;2
2;3;2
1;3;3
5
3
2.333333
2.333333
2.333333
-0.5
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I would appreciate the authors' response to my comments in \"Weaknesses\"." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Although there is room for improvement in writing style, the paper itself is well-written enough to make readers understand their motivation, the proposed method, and the experimental results.\n2. The proposed video-to-audio model is well-designed to address the issue of synchronization between video and audio. There may be other designs for resolving the issue, but they conducted ablation studies to demonstrate that their designed model works well.\n3. The authors quantitatively evaluated their model on the commonly used benchmarks and qualitatively analyzed the audio signals generated from the proposed and previous models for comparison. These experimental results show that the proposed model outperforms the previous models." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a new video-to-audio model, featured by the semantic adapter and temporal adapter. The proposed model uses the [Auffusion](https://arxiv.org/abs/2401.01044) model as a baseline, and not only video-audio paired data but also text-audio paired data are used for training its sub-modules for connecting between the visual encoder and Auffusion. The temporal adapter, trained with the BCE loss or MSE loss to estimate the energy map of audio from video, enhances the synchronization between video and audio. The authors conducted both quantitative and qualitative comparisons with previous video-to-audio models to demonstrate that the proposed model outperforms them. They also conducted ablation studies to show that their proposed semantic and temporal adapters are effective." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- L.365: \"We employed several evaluation metrics to assess semantic alignment and audio quality, namely Mean KL Divergence (MKL) (Iashin and Rahtu, 2021), CLIP similarity, and Frechet Distance (FID) (Heusel et al., 2017), following the methodology of previous studies (Luo et al., 2023; Wang et al., 2024; Xing et al., 2024). MKL measures paired sample-level similarity\"\n - The application of FID to audio quality evaluations is proposed by [Iashin and Rahtu (2021)](https://www.bmvc2021-virtualconference.com/conference/papers/paper_1213.html), and [Luo et al. (2023)](https://proceedings.neurips.cc/paper_files/paper/2023/hash/98c50f47a37f63477c01558600dd225a-Abstract-Conference.html) followed them. However, [Wang et al. (2024)](https://ojs.aaai.org/index.php/AAAI/article/view/29475) and [Xing et al. (2024)](https://openaccess.thecvf.com/content/CVPR2024/html/Xing_Seeing_and_Hearing_Open-domain_Visual-Audio_Generation_with_Diffusion_Latent_Aligners_CVPR_2024_paper.html) use different metrics, FD ([Liu et al., 2023](https://proceedings.mlr.press/v202/liu23f.html)) and FAD ([Kilgour et al., 2019](https://www.isca-archive.org/interspeech_2019/kilgour19_interspeech.html)). I recommend the authors additionally evaluate their proposed model with these metrics for several reasons. The FID and FAD are calculated from spectrograms and do not consider phase information of audio signals. The FD is based on the PANN network ([Kong et al., 2020](https://ieeexplore.ieee.org/document/9229505)), which takes audio waveforms and achieves better performance in classification tasks than VGGish. Plus, recent papers use FAD or FD more frequently. The evaluation on these metrics will be informative to readers, which means the authors can contribute more to the community." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 1 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Refer to Weakness" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1.\tOriginality: This paper introduces an innovative framework, FoleyCrafter, which stands out in the field of sound generation for silent videos. By combining a pre-trained text-to-audio model with novel adapter designs (semantic and temporal adapters), it effectively addresses the limitations of existing methods in terms of audio quality and video synchronization, showcasing unique and original thinking.\n2.\tQuality: The paper demonstrates high research quality through comprehensive experimental design and implementation. It includes extensive quantitative and qualitative experiments, validating the effectiveness of FoleyCrafter on standard benchmark datasets. The results show that this method surpasses several state-of-the-art approaches in both audio quality and synchronization performance. Additionally, the availability of code and models facilitates future replication and research.\n3.\tClarity: The paper is well-structured, with clear explanations of concepts and model design, allowing readers to easily understand how FoleyCrafter operates. The figures and results in the experimental section are also well-presented, enabling readers to intuitively grasp the method’s performance and advantages.\n4.\tSignificance: FoleyCrafter holds substantial application potential in the field of video-to-audio generation. This approach not only enhances the realism and synchronization of sound effects but also offers controllability and diversity through text-based prompts. Such innovations have broad applicability in multimedia production, including film and gaming, and further advance cross-modal generation technology in the audio-visual domain." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces FoleyCrafter, a framework designed for automatically generating realistic and synchronized sound effects for silent videos. FoleyCrafter leverages a pre-trained text-to-audio model, incorporating a “semantic adapter” and “temporal adapter” to ensure that the generated audio is semantically aligned with video content and precisely synchronized over time. Additionally, it supports customizable audio generation through text prompts. The primary contributions include: 1) presenting a novel neural Foley framework for high-quality, video-aligned sound generation, 2) designing semantic and temporal adapters to improve audio-video alignment, and 3) achieving state-of-the-art performance on benchmarks through comprehensive quantitative and qualitative evaluations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper’s originality appears limited. The whole model system exploits many present models, such as Freesound Project and Auffusion.\nAlthough the part of Quantitative Comparison includes evaluations in terms of semantic alignment, audio quality and temporal synchronization, the comparison of audio generation speed has not been expressed.\nThe lack of some ablation experiments for Semantic Adapter and Temporal Controller weakens persuasiveness. The Semantic Adapter could be entirely removed to observe the system’s performance without visual semantic information. The Onset Detector and Timestamp-Based Adapter could be individually removed to investigate their roles in temporal alignment and onset detection. In addition, it would be more persuasive if ablation experiments for Parallel Cross-Attention with different λ had been done." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "No" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "No" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.Originality: The authors proposed two adapters to improve the audio synthesis. However, the structure inside originates from other works.\n2.Quality: Although the method proposed is effective compared to others, it lacks rigorous mathematical proof.\n3.Clarity: Semantic adapter has not been clarified clearly, especially the cross-attention component.\n4.Significance: The significance of the method is relatively high comparing to existing methods. However, parameters to be trained is relatively high compared to others." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors proposed a framework called FoleyCrafter to synthesize high-qulity audio with text prompt, which contains two key components as follows:\n1.Semantic adapter condition generated audio conditioned on video features, rendering more semantically relevance.\n2.Temporal adapter estimates time signals, synchronizing with audio.\nThe authors carried experiments on two datasets and achieved better performance compared with current powerful models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.Lack of Innovation: In this article, there are two key components. However, the semantic adapter is derived from the IP-adapter[1], while the temporal adapter originates from ControlNet[2]. This article lacks substantial original contributions.\n2.Inference Latency Concerns: In the articles mentioned above, the authors only add a single adapter to the original model. However, in this article, the proposed method includes two separate adapters, which may result in higher inference latency, potentially impeding efficiency and scalability.\n3.Insufficient Analysis of Text Prompts: In this article, there are text prompts and video prompts for audio generation. However, The authors provide only a qualitative description of the text prompt's capabilities, without comparing it to other models.\n\n[1] Hu Ye, Jun Zhang, Sibo Liu, Xiao Han, and Wei Yang. Ip-adapter: Text compatible image prompt adapter for text-to-image diffusion models. arXiv preprint arXiv:2308.06721, 2023.\n[2] Lvmin Zhang, Anyi Rao, and Maneesh Agrawala. Adding conditional control to text-to-image diffusion models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 3836–3847, 2023a." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024foleycrafter,\ntitle={{FOLEYCRAFTER}: {BRING} {SILENT} {VIDEOS} {TO} {LIFE} {WITH} {LIFELIKE} {AND} {SYNCHRONIZED} {SOUNDS}},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4cQVUNpPkt},\nnote={under review}\n}" }, "abstract": { "value": "We study Neural Foley, the automatic generation of high-quality sound effects\nsynchronizing with videos, enabling an immersive audio-visual experience. Despite\nits wide range of applications, existing approaches encounter limitations\nwhen it comes to simultaneously synthesizing high-quality and video-aligned\n(i.e.,semantic relevant and temporal synchronized) sounds. To overcome these\nlimitations, we propose FoleyCrafter, a novel framework that leverages a pretrained\ntext-to-audio model to ensure high-quality audio generation. FoleyCrafter\ncomprises two key components: a semantic adapter for semantic alignment and a\ntemporal adapter for precise audio-video synchronization. The semantic adapter\nutilizes parallel cross-attention layers to condition audio generation on video features,\nproducing realistic sound effects that are semantically relevant to the visual\ncontent. Meanwhile, the temporal adapter estimates time-varying signals from\nthe videos and subsequently synchronizes audio generation with those estimates,\nleading to enhanced temporal alignment between audio and video. One notable\nadvantage of FoleyCrafter is its compatibility with text prompts, enabling the use\nof text descriptions to achieve controllable and diverse video-to-audio generation\naccording to user intents. We conduct extensive quantitative and qualitative experiments\non standard benchmarks to verify the effectiveness of FoleyCrafter. Models\nand codes will be available." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Diffusion Model", "Audio Generation", "Video to Audio Generation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/39ab2b4afda2473a995025c573d1f34e522636ef.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/64f4db8660402768cb4eaf0d76c80786b76db4de.zip" }, "title": { "value": "FOLEYCRAFTER: BRING SILENT VIDEOS TO LIFE WITH LIFELIKE AND SYNCHRONIZED SOUNDS" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ciEeIiIJ7
Let’s disagree to agree: Evaluating collective disagreement among AI vision systems
main
Active
deep learning;representational similarity
applications to neuroscience & cognitive science
3;3;3;3;5;6
3;3;3;4;3;3
2;2;2;2;3;3
3;2;2;1;2;3
2;3;2;2;3;2
3.833333
3.166667
2.333333
2.166667
2.333333
-0.307148
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "- The quotes from Geirhos et al. are mainly about strategies. It is fair to say that the quote \"the consistency between CNNs and human observers, however, is little above what can be expected by chance alone\" and fig 1 in Geirhos et al. are about mistakes, not just strategies, and it does raise questions that we observe human-model consistency that seems driven by the image/label rather than random chance. However, that means your paper demonstrates the need for context in Geirhos et al. It does not mean that the consistency you show is nonobvious or a significant contribution - that's a separate question.\n\n- So, let's come to that question and your second response point above. You said something similar to reviewer vUbw - \"humans and machines are potentially challenged in the same way by the same images. We think this is not obvious at the population level because all the models were individually trained/fine-tuned on the same labels, so there's no ambiguity in their training, but ambiguity and disagreement nonetheless arises. And that ambiguity and disagreement appears to be aligned with populations of humans.\" And to me, \"Although the labels come from humans, the labels this model population sees are unanimous amongst all models. Therefore, we see that despite all models being trained to provide the same labeling over the training set, they disagree on held out images in a way that is similar to human populations.\"\n - You're right to say that the models were trained on the same labels for each image and that takes away one source of ambiguity. \n - However, my point is that when it comes to ambiguous images, you'll have groups of images in the training dataset that contain similar features (along with some different ones), but have different labels, and groups that have the same labels but different features (along with some similar ones). That is another source of ambiguity, so \"all the models were individually trained/fine-tuned on the same labels, so there's no ambiguity in their training\" seems false. \n - Not only is this a source of ambiguity, it's a well-known one. And not only is it well-known, I think it's the one driving your results. \n\nI like the idea of investigating populations and your approach to experimentation. I also think the paper is well-written and visualized. However, I don't think you've found something nonobvious yet, and would encourage you to keep investigating. I agree with reviewer vUbw that more careful interpretation of similarity is necessary. I'll maintain my score." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": { "value": "Thanks for the comments - score not changed." }, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "We value your thorough review and we're responding as timely as possible because the points you have made are important and we would like to discuss further.\n\n> **It should not be surprising that both humans and machines have difficulty in correctly identifying the target class in these cases. But it is not justified to use this as a basis to say that machines and humans are making mistakes in the same kind of way - it is much more nuanced than that.**\n\nWe agree with this point. We are not saying that humans are making mistakes in the same kind of way, but humans and machines are potentially challenged in the same way by the same images. We think this is not obvious at the population level because all the models were individually trained/fine-tuned on the same labels, so there's no ambiguity in their training, but ambiguity and disagreement nonetheless arises. And that ambiguity and disagreement appears to be aligned with populations of humans.\n\n> **My first concern is related to the assumption from which the paper starts (L19) about the “ factors driving disagreement among AI systems are also causing misalignment between AI systems and humans perception” - why would that be the case?**\n\nWhile the actual notion of disagreement among a population of models has not been measured before our submission, it has been an explicitly stated assumption that the mistakes that AI models make are distinct from the mistakes that humans make. For instance, [Geirhos et al. (NeurIPS 2020)](https://arxiv.org/abs/2006.16736) make the points:\n\n\"The consistency between CNNs and human observers, however, is little above what can be expected by chance alone—indicating that humans and CNNs are likely implementing very different strategies.\"\n\n\"We conclude that there is a substantial algorithmic difference between human observers and the investigated sixteen CNNs: humans and CNNs are very likely implementing different strategies.\"\n\n“Cohen’s $\\kappa$ for CNN-human consistency is very low for both models (`.068` for ResNet-50; `066` for CORnetS) compared to `.331` for human-human consistency.”\n\nFurthermore, [Geirhos et al. (NeurIPS 2018)](https://arxiv.org/abs/1808.08750) make the point:\n\n“Additionally, we find progressively diverging patterns of classification errors between humans and DNNs with weaker signals.”" }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": { "value": "Discussion of some key concerns" }, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "We're responding to the reviews as timely as possible because the points you have made are important and we would like to discuss further." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": { "value": "Also thank you for the review" }, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "> **Motivation isn't that convincing**\n\nWhile the actual notion of disagreement among a population of models has not been measured before our submission, it has been an explicitly stated assumption that the mistakes that AI models make are distinct from the mistakes that humans make. For instance, [Geirhos et al. (NeurIPS 2020)](https://arxiv.org/abs/2006.16736) make the points:\n\n\"The consistency between CNNs and human observers, however, is little above what can be expected by chance alone—indicating that humans and CNNs are likely implementing very different strategies.\"\n\n\"We conclude that there is a substantial algorithmic difference between human observers and the investigated sixteen CNNs: humans and CNNs are very likely implementing different strategies.\"\n\n“Cohen’s $\\kappa$ for CNN-human consistency is very low for both models (`.068` for ResNet-50; `066` for CORnetS) compared to `.331` for human-human consistency.”\n\nFurthermore, [Geirhos et al. (NeurIPS 2018)](https://arxiv.org/abs/1808.08750) make the point:\n\n“Additionally, we find progressively diverging patterns of classification errors between humans and DNNs with weaker signals.”\n\n> **the training data is also a product of human reaction to ambiguity**\n\nAlthough the labels come from humans, the labels this model population sees are unanimous amongst all models. Therefore, we see that despite all models being trained to provide the same labeling over the training set, they disagree on held out images in a way that is similar to human populations." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": { "value": "Discussion of some key concerns" }, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- It is unclear why the authors concluded from Figure 1 alone that the stimuli causing the most agreement/disagreement among AI systems also cause the most agreement/disagreement among humans. Although the figure shows the agreement levels, it lacks specific information on the stimuli that contributed to such obtained outcomes\n- In Table 1, what is the motivation behind comparing the models agreement with the human viewing time and the difficulty score?\n- It is unclear why the authors concluded from Table 1 that ObjectNet is more challenging for both humans and the models?\n- I would recommend to provide a correlation measure for Figure 5.\n- Do you expect any bias in human annotations?\n- In Figure 6, How did you determine the visual factors for the models?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The comparison between model performance and human annotations is interesting and insightful." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper compares the collective behaviour of 1,032 AI vision systems with 42 humans in annotating images, investigating how various visual factors influence agreement levels. It highlights that images that are challenging for the AI systems often pose similar difficulties for humans. The paper suggests that there is an alignment in visual complexity across both groups. The study quantifies (dis)agreement among AI systems and compares the results with human annotations. Additional factors such as difficulty score, minimum viewing time, and specific visual properties are examined. This approach offers insights into common challenges shared by AI and human perception." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper is difficult to follow\n- The motivation and contributions of the paper is not clear\n- The paper lacks novelty, as it mainly consists of a comparison between the performance of machine learning models and human annotators. Reader may expect a novel methodology to be derived from these analyses.\n- The paper lacks a discussion about the limitations and potential directions for future work" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "In light of the previous comments, I think the main actionable points are:\n- the motivation of the paper needs to be reconsidered and clarified\n- so does the conclusion and interpretation of results, in particular, I would recommend more carefully interpreting the similarities between humans and artificial models.\n\nFurther clarification is also needed on:\n- Figure 1 - the interpretation of the histograms for model and human agreement (“histograms along each axis reflect the proportion of images at each marginal agreement level”). The caption states there is a positive correlation but does not state how this conclusion is reached. Later on, Table 1 provides some values but the exact method for reaching those values is missing. Visually the histograms do not seem positively correlated, but again clarifying in text would be better.\n\n- Details of the pretraining of each model, or at least grouped per family of models (maybe grouped by architecture type) used in this analysis would have been relevant. Also, further discussion and interpretation of results, again grouped per family of models could have added value to this paper. For example, how do different model architectures contribute to the level of disagreement? \n\n- Again, for clarity, it would be good to state clearly how the values for correlation between model agreement and the human behavioural measures (Table 1) are computed. \n\n- Line 432 - What is this subset of selected models? Based on what criteria were these models selected? \n\n- Regarding low-agreement images, it would be interesting to assess the factors that cause disagreement at certain levels of accuracy. Are these factors maintained, and what factors remain/are discarded as the acceleration of agreement occurs (as per L440-442)?\n\nFinally, I think a section on the limitations of this study should be included. For example:\n- the limited number of human participants might not reflect the full spectrum of human visual perception\n- how does approximating perceptual abilities to population disagreement lead to overlooking specific, individual visual factors?\n- is Fleiss’ Kappa the most suitable measure and are there any other agreement measures that could be explored instead?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper has the following (potentially) strong points: \n\n1. The paper assesses the overlap between AI vision models and human disagreement on a collective/population level, rather than an individual level. This is an original approach as far as I know. The assumption is that by identifying patterns in how populations of AI models fail similarly to humans, training methods or architectures that handle difficult stimuli could be developed, and thus improve model robustness and interpretability. The proposed many-to-many comparison is something worth considering in the future, alongside already-established measures.\n\n2. This study models the largest population (afaik) of artificial vision models, spanning 1032 AI models with various architectures, pretraining regimes and data. Such a population should provide a comprehensive view of collective disagreement. However, how each of these models influences the collective disagreement is not discussed enough, but could have been a point to add more value to the paper.\n\n3. It aims to uncover and highlight common factors between humans and artificial models of vision that cause difficulty in object recognition." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper assesses the disagreement among a population of artificial vision systems (1032 models) and compares it with the disagreement among a population of humans (42 human participants). Unlike previous works, populations of agents and humans are compared on a collective level, instead of an individual level. The paper aims to prove that factors that cause disagreement among AI systems coincide with the factors that cause human disagreement, at a population level." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "This work presents the following weaknesses:\n\n1. My first concern is related to the assumption from which the paper starts (L19) about the “ factors driving disagreement among AI systems are also causing misalignment between AI systems and humans perception” - why would that be the case? It states that the current study challenges (L484) “the assumption present in prior work that disagreement among AI systems is unrelated to human visual processing”. But this assumption (L484) is not adequately founded, or at least not supported through the references provided which do not claim that disagreement between artificial models is unrelated to human visual processing. To reinforce, the initial assumption is not adequately discussed or supported by the correct references making it difficult to understand the motivation of the paper in the first place. \n\n\n2. For a study comparing human and artificial visual systems, the authors might want to consider the body of literature that draws from neuroscience to better understand how convolutional neural networks (CNNs) could model early visual processing pathways [e.g. A Unified Theory of Early Visual Representations from Retina to Cortex (Lindsey et al., 2019); Spatial and Colour Opponency in Anatomically Constrained Deep Networks (Harris et al. , 2019)]. Such works aim to understand the similarities between human visual systems and artificial models at the lower level of neurons and how the functional and structural layouts of biological visual systems could better inform DNN architectures.\n\n3. While the idea of comparing many to many is interesting and could add value on top of accuracy and one-to-one error consistency measures, the experimental setup seems to be (visually) ill-posed. For instance, the challenging examples are complex scenes, e.g. Figure 12, in which the label corresponds to just one small part of the scene. It should not be surprising that both humans and machines have difficulty in correctly identifying the target class in these cases. But it is not justified to use this as a basis to say that machines and humans are making mistakes in the same kind of way - it is much more nuanced than that. \n\n4. While the assessment in Fig 6 aims to show the proportion of human-annotated top visual attributes, it is unclear on an instance level how and why humans and artificial models reach (dis)agreement. Take for example the cases where the model makes random kinds of predictions humans clearly would not. For example, Figure 3c is clearly not a roof tile, a scorpion, or a sandal - no human would guess any of those, although they could still be wrong of course." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- I am curious how these experiments would fare for top-5 classification - possibly for humans, not just models \n- In figure 6, how should we factor in the difference in proportions between models and humans, even if the order of proportions is mostly the same? I realize you're not making this claim, but if we want to establish similar underlying mechanisms, we'd need to deal with the differences in proportion for each factor. What might this imply for future studies? \n- \"Images at low agreement levels are produce significantly lower Fleiss' $\\kappa$ than high agreement and all images, even for models at high performance levels\" - I thought that agreement is *defined* as Fleiss' $\\kappa$. Am I misinterpreting? Is the point that even when models are split and Fleiss' $\\kappa$ is recalculated, it is low for the images that had low Fleiss' $\\kappa$ across all models? That would be more meaningful, though continues to point to images that are simply ambiguous." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "### Quality \n- Good problem setup: well-defined, statistical choices make sense, and experiences overall make sense (I will list a couple exceptions in the weaknesses) \n- Good application of ImageNet-X to get systematic error analysis on naturalistic images \n- Comparing to a population of models seems promising\n\n### Clarity\n- Writing style is very clear. I rarely felt confused when reading the paper, and the structure made sense.\n- Figures are well-designed. They are the most useful aspect for building intuition about the results - they look good, and show the right concepts. \n- Explanation of Fleiss' $\\kappa$ helps build intuition for what \"agreement\" means, and also helps strengthen the experimental design choices" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper attempts to establish similarity between artificial and biological vision by showing that populations of AI models and populations of humans show intra-group disagreement on the same stimuli. It motivates itself by claiming that prior work shows disagreement among models being a function of limitations in their development, rather than expressions of an underlying mechanism in both AI and human vision. \n\nThe paper defines agreement as Fleiss' $\\kappa$ for an image, calculated over a population of vision systems. It surveys ~40 humans and ~1000 models, trying CNNs, ViTs, and hybrids and varying model size, dataset size, and training methods (pretraining and finetuning). It also uses human minimum viewing time and difficulty score as comparison metrics. \n\nResults show:\n- All metrics appear to correlate with model agreement in intuitive ways - not strong correlations, but significant and all in the intuitive direction\n- The clearest relationship is for low-difficulty high-model agreement images \nThe paper takes human-annotated visual attributes from the ImageNet-X dataset, in which humans annotated what aspects of an image make it difficult to classify. The paper showed that for both low-human agreement and low-model agreement images, the percent of images with each top difficulty factor shows similar relative influence - the percentage of images for each factor decreases in mostly the same order for both humans and models. The most influential factors are found to be background, pose, color, pattern, and \"smaller\". \n\nThe paper also shows that model agreement increases as accuracy increases. \n\nThe paper then positions itself against other error analysis-related works, works that use synthetic stimuli to assess differences, and metamers (this being an opposite of a metamer)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "### Quality\n#### Problem\n- Motivation isn't that convincing - the paper claims that the typical assumption around model errors is \"intrinsic to these systems and unrelated to aspects of human visual processing.\" But that isn't always the case - I think ambiguous images (which seem to be the crux of this paper) are not only known to be difficult for models just as they are difficult for humans, but are easily cited by most researchers as a cause of model error and likely disagreement\n - The paper also claims evidence that \"disagreement among AI vision systems is driven by aspects of human visual perception, particularly image difficulty\" - it's worth nothing that classifications are a human concept, not an inherent property of the image, and training data reflects that. Maybe the paper isn't directly making this claim, but it seems that it's suggesting there are similar mechanisms between models (at least model populations) and humans that drive disagreement; I'd argue that these images are simply actually ambiguous, the classification is a product of human reaction to ambiguity, the training data is also a product of human reaction to ambiguity, and the model directly encodes that rather than showing an interesting emergent behavior. \n- Data on variations of models is limited to a list in the appendix - would be good to be given a structured representation of the variations in a table\n\n#### Results\n- Though the correlation coefficients are nontrivial and the figures line up with them, and I wouldn't expect strong correlations for such a high-dimensional problem, the figures do show a lot of spread. \n- This also make the results seem less surprising - from both this and figure 6, where we see the factors being \"background\",\"pose\", \"color\", \"pattern\", and \"smaller\", it seems that the difficult images are simply truly ambiguous. It's not a matter of ML fallibility, but I wouldn't expect it to be. It's also not an underlying surprising mechanism in human vision that makes humans fallible on them. The images are ambiguous and the humans who labeled them probably weren't completely sure what to label them. Even if we call it a shared mechanism/underlying principle of human vision, it's not surprising or unknown. \n- It makes sense that agreement increases as overall accuracy increases, but this is really not surprising. It could be that there are cases where models all classify the image as the same wrong class, but just given how training works, it's likely the original image is misclassified (or the original assumption is true). In either case, this doesn't offer an alternative to an explanation to the original assumption. \n\n### Clarity\n- Would help to have an explanation of why Fleiss' $\\kappa$ is a good measure of agreement, really just intuition on how it works. \n- Sections 3.1 and 3.2 don't need to be there - they explain concepts that are immediately clear from the figures. \n- More descriptive statistics on the figures would help understand how predictive the results are. \n\n### Originality and significance\n- I haven't seen this framing of this problem. However, the concept itself - that ambiguous images are difficult for both humans and models - doesn't seem novel. It also doesn't seem to warrant this much formalization." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- In Fig 1, it is a bit surprising that there are very few images with high human agreement from the top histogram, which means humans rarely have full agreement on images. Could you explain possible reasons behind this?\n\n- If humans and AI cannot recognize the difficult images or the edge-case images, it means vision alone cannot solve the problem and we probably do not have a better solution using only vision. What other benefits could it bring to us if we study more on the difficult images? In other words, how does studying the edge-case images help?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The strengths:\n\n- brings a novel view from population-level comparison of AI and human on vision systems. \n\n- conduct extensive experiments on a large population AI models\n\n- Interesting findings on AI models not perform well on difficult images due to perceptual challenges that human faces as well." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper brings a new point from population-level comparisons between AI and human vision systems, different from the previous individual Ai and human comparison. The authors conduct experiments using a large population of 1032 models and a previous user study with 42 human participants. They use Fleiss' kappa to quantify the level of agreement and find out a few interesting points on the correlation between AI model (dis)agreement and human (dis)agreement. They claim that the low agreement on hard images is due to intrinsic perceptual challenges shared by both AI and humans instead of model structure limitations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Weaknesses of this paper include:\n\n- Some findings are quite intuitive, for example, the correlation between AI (dis)agreement and human (dis)agreement. This probably is due to the labels are created by humans. \n\n- 42 participants from user study might be a bit bias. May conduct a few more user studies and combine with previous data.\n\n- The image style does not look very good, some images are taking too many spaces but contain relatively few contents.\n\n- at line 402, \"Images at low agreement levels are produce...\", should be \"... are producing...\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The paper states: \"AI systems might be more sensitive to background variations than humans and human population are more likely to disagree when pattern variations are present\". Explain what \"pattern\" refers to here.\n\nWhen giving models' accuracy on ImageNet and ObjectNet datasets, are you using top-5 or top-1 accuracy? What about for humans?\n\nFigure 7: What is \"Bin Mean Accuracy\"?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The experiments seem solid and the results are well-presented. The authors tested over 1,000 different models, including CNNs, ViTs, and hybrid models. The paper goes more deeply than just giving correlation statistics, and investigates what features low-agreement images have in common." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates correlations between populations of humans and object-recognition systems on object-classification disagreements. The results show that there is significant correlation between human and model population disagreements, as well as between human minimum viewing time and model disagreements. The results support the hypothesis that this correlation is driven by aspects of human visual perception that makes certain aspects of images difficult to classify." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I'm not sure how useful these results are, either for understanding human or machine vision, or for improving machine vision systems. A useful result would point in a new direction for experiments (to better understand underlying mechanisms) and/or architectural improvements. But what are the next steps with these results? The authors did not address this or make the case that these results are important for the field. \n\nThe paper states: \"In this work, we challenge the assumption that disagreement among AI systems is intrinsic to these systems and unrelated to aspects of human visual processing\". But what are the citations for this assumption? \n\nI didn't understand, in second paragraph, how this assumption \"aligns with standard approachs for comparing internal representations of AI and biological vision, such as representational similarity analysis\" or how it is \"explicit in behavioral extrapolation tests\" -- this needs better explanation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1.Did the authors consider analyzing cases where model agreement is high but human difficulty is low, or where model agreement is low but human difficulty is high? Such cases might offer valuable insights into the nuanced differences between AI model behavior and human perception.\n2.Although multiple architectures were included, why did the authors not explore the impact of different architectures on the experimental results?\n3.Can the higher disagreement on challenging human images be reduced through specific adjustments to models or training datasets?\n4.Previous research has shown links between AI-human disagreement and human visual processing at the individual model level. Why were these relevant studies not carefully discussed in the related work section?\n\nIf the authors can address these issues, I would be happy to raise my score." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.Innovative Research Topic:\nThe authors investigate an intriguing and novel research area by examining AI model and human visual disagreements at a population level. This approach is unique in that it moves beyond individual model comparisons to analyze the collective behavior of AI vision systems.\n2.New Method for Measuring Human-AI Discrepancy:\nBy introducing a method to measure disagreement at the population level, the study provides a new way to quantify the difference between AI models and human perception, adding a meaningful metric to the field.\n3.Focus on Naturalistic Stimuli:\nUnlike prior work that often uses synthetic stimuli, this study investigates the properties of naturalistic stimuli that elicit the most disagreement among AI models, making its findings more applicable to real-world scenarios.\n4.Insights into AI-Human Perceptual Alignment:\nThe article provides evidence suggesting that disagreements among AI systems are influenced by aspects of human visual perception, particularly in image difficulty, as measured by human behavioral data. This insight supports the idea that individual differences in AI vision systems may reflect differences in human visual processing rather than inherent AI limitations." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The article explores the disagreement behaviors of AI vision systems, diverging from traditional approaches that compare individual AI models to biological vision. Instead, this study investigates patterns of agreement and disagreement among a diverse population of AI models by measuring \"aggregate disagreement\" across model outputs. It aims to determine which inputs produce the most divergent responses among models and assesses whether these inputs also create discrepancies between AI systems and human perception.\nA significant finding is that even images causing high disagreement among AI models often align with human perceptual challenges. This alignment suggests that the limitations in AI models mirror similar perceptual difficulties in humans, offering valuable insights into AI-human vision comparisons at a population level. This work contributes to the field by reframing disagreement not as an intrinsic limitation of AI systems but as an opportunity to study the shared perceptual challenges between artificial and human vision systems." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.Limited Analysis of Outlier Cases:\nThe authors report correlations between model agreement and human behavioral measures, but they do not analyze specific cases where model agreement is high but human difficulty is low, or vice versa. Such an analysis could provide deeper insights into unique points of divergence.\n2.Lack of Architecture-Specific Insights:\nAlthough multiple model architectures are included in the study, the authors do not analyze how different architectures impact the results. This oversight limits the understanding of how architectural variations might contribute to AI-human agreement or disagreement on challenging stimuli.\n3.No Exploration of Methods to Reduce Disagreement:\nWhile the study highlights greater disagreement on images of higher human difficulty, it does not explore whether certain methods, such as targeted model adjustments or expanded training datasets, could reduce this disagreement and improve alignment with human perception.\n4.Insufficient Citations of Related Work on AI-Human Disagreement:\nPrior research has shown that there are links between AI-human disagreement and human visual processing at the individual model level, yet the authors do not reference these foundational works. Including these citations could strengthen their arguments by situating the study within the existing body of research." }, "withdrawal_confirmation": null } ]
4dAgG8ma3B
Chemistry-Inspired Diffusion with Non-Differentiable Guidance
main
Active
guided diffusion;ai4science;molecule generation
applications to physical sciences (physics, chemistry, biology, etc.)
3;5;5;8
5;3;4;4
2;2;3;4
1;2;2;3
2;2;3;4
5.25
4
2.75
2
2.75
-0.396059
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see Weaknesses" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. CHEMGUIDE’s use of quantum chemistry as a non-differentiable oracle in conditional diffusion models is meaningful.\n2. The paper reports improvements in stability and force metrics over baselines.\n3. Implementing zeroth-order optimization in a diffusion context is well-justified." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes CHEMGUIDE, a approach that uses non-differentiable guidance for the conditional generation of molecular structures in diffusion models. CHEMGUIDE use quantum chemistry oracles as guidance, providing gradients for sampling distributions. The method applies zeroth-order optimization to enhance molecule stability by minimizing atomic forces. Experiments demonstrate CHEMGUIDE’s effectiveness on two datasets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper could enhance its rigor by comparing CHEMGUIDE with more baseline models, such as MolGAN, and more importantly, other existing guidance methods. The current comparisons seem limited. It would also be comprehensive to experiment with more datasets.\n2. While GFN2-xTB is a reasonable compromise, comparing CHEMGUIDE results against high-accuracy methods like DFT more extensively could help validate the chemical accuracy of generated molecules.\n3. The paper lacks a thorough discussion on the limitations of using a non-differentiable oracle, such as the potential difficulty in handling certain molecular configurations or diverse chemical spaces. \n4. The use of the GFN2-xTB method and bilevel optimization adds computational complexity, which could restrict practical usage. And The guidance scale parameter lacks an adaptive mechanism. Exploring automated scale scheduling would improve usability.\n5. Code is not provided." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Is there a typo in the numerator in Eq. 15 when approximating the gradient? It should say $\\frac{ \\mathcal{F}[z_{x,t} + c U, z_{h,t}] - \\mathcal{F}[z_{x,t} - c U, z_{h,t}] )}{2c}$.\n\nIn Algorithm 1, the approximated gradient g_{t-1} has a dependency to the state at time $t=0$. Is this is a typo, since Eq. 15 does not refer to the clean data prediction." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The usage of approximate gradients from non-differentiable oracles in diffusion guidance for molecule design is novel and interesting.\nTo evaluate their proposed method, the authors run a suite of multiple experiments showing that on the two common benchmarks the generated samples based on their sampling algorithm have improved evaluation metrics compared to the baselines." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose CHEMGUIDE, a sampling algorithm to guide pre-trained (latent) diffusion models for molecule design with the goal to optimize for stable molecules indicated by smaller force norms when evaluated using the xTB oracle functions. As the xTB oracle function, which outputs the forces per atom in a molecule, is non-differentiable, the authors make use of known gradient approximation from random pertubation theory to approximate the gradients suitable for guidance during the diffusion sampling trajectory. The authors also suggest how their non-differentiable guidance can be combined with neural regressors as commonly done in the diffusion models literature. The authors show that they non-differentiable guidance when applied on GeoLDM leads to generated molecules with lower force norms compared to the samples when GeoLDM is used without the proposed guidance, indicating that their method works for the models trained on common benchmark datasets such as QM9 and GEOM-Drugs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The guidance component from the non-differentiable oracle shows small improvement on the QM9 and GEOM-Drugs dataset. While the idea is interesting, a stronger baseline to compare against is to use the samples generated from Baseline GeomLDM and perform a relaxation using xTB. As the authors mention in their appendix A - Implementation Details, the sampling time for 100 molecules is quite slow with 6 hours and 18 minutes if they perform their proposed guidance in the last 400 diffusion timesteps using xTB as oracle.\nHow does the GeoLDM baseline (right column) in Table 1 and 2 compare if xTB is used using a pre-defined number of relaxation steps?\n\nFurthermore, I found it hard to read the paper as Section 3 Methodology contains sections 3.1 and 3.2 which are not the author's contribution but already existing methods. I would move these Section 2 within the preliminaries." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Most of my questions are about experiments, I feel the current experimental comparisons are too weak (only comparing with unconditional generation), see weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* This paper studies an important and timely problem, how to move beyond generative models (randomly sample from learned distributions) to efficient search and optimization over the space with guidance is a pressing question in molecular generation. \n* Derivative-free guidance is very well-motivated and I agree that it is of great importance to problems in real-world molecular discovery process." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies how to achieve derivative-free molecular optimization with diffusion models. The main motivation for this work is that many real-world molecular properties are sophisticated and can only be evaluated through expensive experiments or non-differential simulations. In this paper, a zero-order optimization method is constructed by perturbing the input molecular conformation and the effect on the molecular properties. The effectiveness of the proposed methods is validated on a set of quantum mechanical properties for small molecules." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I have several concerns about this paper, mostly coming from the claim, related work, and experiments. \n\n* First of all, the idea of derivative-free guidance is not new, in molecular optimization, evolutionary algorithms have been used [1], twisted sequential Monte Carlo for protein design [2], twisted SMC for large language model inference [3], and stochastic optimal control method for music generation [4]. I believe the claim for this work to be the first of its kind is inappropriate, neither for derivative-free guidance nor in molecular design.\n\n* Given this is not new, the related work section in the Appendix only discusses the general molecule generation literature and part of the guided diffusion model literature, but misses the critical relevant literature both in molecular generation and other domains.\n\n* The experimental results are weak, even if I agree on the general motivation of derivative-free guidance, (1) there are works such as simple evolutionary algorithms [1] and twisted SMC [2] available for comparison; even if you do not want to compare against them, you need to compare with gradient-based method --- if you think about the experiment budget, you can always construct a classifier by the samples you have evaluated, e.g. a trained neural network. Despite this may not generalize OOD or perform badly, but you may still include them as baselines. For more potential baselines to compare against, you can check this benchmark [5].\n\n[1] Schneuing, A., Du, Y., Harris, C., Jamasb, A., Igashov, I., Du, W., Blundell, T., Lió, P., Gomes, C., Welling, M. and Bronstein, M., 2022. Structure-based drug design with equivariant diffusion models. arXiv preprint arXiv:2210.13695.\n\n[2] Wu, L., Trippe, B., Naesseth, C., Blei, D. and Cunningham, J.P., 2024. Practical and asymptotically exact conditional sampling in diffusion models. Advances in Neural Information Processing Systems, 36.\n\n[3] Zhao, S., Brekelmans, R., Makhzani, A. and Grosse, R.B., Probabilistic Inference in Language Models via Twisted Sequential Monte Carlo. In Forty-first International Conference on Machine Learning.\n\n[4] Huang, Y., Ghatare, A., Liu, Y., Hu, Z., Zhang, Q., Sastry, C.S., Gururani, S., Oore, S. and Yue, Y., Symbolic Music Generation with Non-Differentiable Rule Guided Diffusion. In Forty-first International Conference on Machine Learning, 2024.\n\n[5] https://github.com/brandontrabucco/design-bench" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "**clarifications**\n- Can you clarify which aspects of your method implement SPSA? Equation 15 combines two ideas: 1) introducing random perturbations for atom coordinates from a standard normal distribution, and 2) using these perturbations in a finite-difference approximation of the gradient. It would be helpful to explicitly state which of these constitutes SPSA and how it differs from standard finite differences.\n\n- What are the theoretical requirements for SPSA convergence? The paper mentions continuity of z as a requirement, but are there other conditions (e.g., smoothness, bounded variance) needed for the gradient estimates to be reliable?\n\n**suggestions**\n- Given the extensive appendix (sections A-K), adding a table of contents at its beginning would improve navigation.\n\n- Consider adding a brief discussion of computational overhead introduced by SPSA compared to standard finite differences.\n\n**nitpicks**\n- Stability' is unnecessarily capitalized on page 6, section 4.2" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- **Strong and relevant contribution**: The idea proposed in the paper has clearly relevant applications in a number of domains (i.e. any field with expensive oracles) and naturally extends existing efforts in the field of diffusion models. \n- **Novelty**: The method is conceptually simple yet novel, opening the door for various applications which could benefit from the guidance of a non-differentiable oracle.\n- **Thorough empirical evaluation**: The paper presents thorough empirical analysis, effectively demonstrating both the strengths and limitations of the proposed method. The experiments span multiple datasets (QM9 and GEOM), various molecular properties, and different guidance approaches (explicit, implicit, and combined).\n- **Extensive analysis**: the empirical observations are grounded in real-world chemistry insights, with careful analysis of failure cases and performance trade-offs.\n- **Clarity of presentation**: the paper is well-written and includes many relevant and well-designed figures." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose ChemGuide, a method for estimating diffusion guidance (i.e. gradients) from a non-differentiable property predictor. The goal is to eliminate the need for labeled training data typically required for property prediction networks. They demonstrate their approach in the context of 3D molecular generation. ChemGuide enables an unconditional diffusion model to generate more stable 3D molecular structures by incorporating guidance from quantum chemistry calculations (GFN2-xTB) that serve as a non-differentiable oracle." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- **Justification of the zeroth-order optimization and comparison to other non-gradient optimization methods**: The paper lacks clear justification for choosing SPSA as the zeroth-order optimization method. Section 3.2 would benefit from a discussion of alternative approaches (e.g., finite differences, evolution strategies) and justification for their specific choice in terms of computational efficiency and accuracy trade-offs, and suitability for this particular application of guiding molecular diffusion models. A short pragaraph would suffice here.\n\n- **Assessing the quality of the gradients obtained with ChemGuide vs a differentiable regressor** While the paper shows final performance metrics, it lacks direct analysis comparing the gradients estimated via zeroth-order optimization to those from a differentiable regressor. Such comparison could provide insights into how reliable are CHEMGUIDE's estimated gradients compared to differentiated gradients. For example, the authors could plot the cosine similarity between CHEMGUIDE's estimated gradients and those from a differentiable regressor across different timesteps of the diffusion process\n\n- **Explain which guidance method is suitable for which property**: The authors observe that noisy and clean guidance methods appear complementary, with properties poorly optimized by one often well-optimized by another (e.g., α vs ∆ϵ). However, the paper would be more practically useful if it provided explanations for these differences, helping practitioners choose the appropriate method for their specific use case." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024chemistryinspired,\ntitle={Chemistry-Inspired Diffusion with Non-Differentiable Guidance},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4dAgG8ma3B},\nnote={under review}\n}" }, "abstract": { "value": "Recent advances in diffusion models have shown remarkable potential in the conditional generation of novel molecules. These models can be guided in two ways: (i) explicitly, through additional features representing the condition, or (ii) implicitly, using a property predictor. However, training property predictors in conditional diffusion models requires an abundance of labeled data and is inherently challenging in real-world applications. We propose a novel approach that attenuates the limitations of acquiring large labeled datasets by leveraging domain knowledge from quantum chemistry as a non-differentiable oracle to guide an unconditional diffusion model. Instead of relying on neural networks, the oracle provides accurate guidance in the form of estimated gradients, allowing the diffusion process to sample from a conditional distribution specified by quantum chemistry. We show that this results in more precise conditional generation of novel and stable molecular structures. Our experiments demonstrate that our method: (1) significantly reduces atomic forces, enhancing the validity of generated molecules when used for stability optimization; (2) is compatible with both explicit and implicit guidance in diffusion models, enabling joint optimization of molecular properties and stability; and (3) generalizes effectively to molecular optimization tasks beyond stability optimization." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "guided diffusion", "ai4science", "molecule generation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/1a97f1e27792fcc4848c6b15c9994c72645e327e.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Chemistry-Inspired Diffusion with Non-Differentiable Guidance" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4dAhjhm2Mm
A Score-Based Density Formula, with Applications in Diffusion Generative Models
main
Active
score-based density formula;score-based generative model;evidence lower bound;denoising diffusion probabilistic model
learning theory
3;3;3;3
4;3;3;4
2;3;4;2
2;2;2;1
1;2;2;2
3
3.5
2.75
1.75
1.75
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please refer to weaknesses section. Also, why do the authors consider the particular SDE in lines 76-77 and not a more generic one?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper focuses on the theoretical understanding of score-based generative models (SGMs) by applying a density formula to explain why optimizing the evidence lower bound (ELBO) effectively supports training for diffusion models like DDPMs. By investigating the theoretical aspects behind ELBO optimization, the authors promote a more rigorous basis for diffusion models.\n\nAdditionally, the paper extends the implications of this analysis to areas such as GAN regularization, diffusion classifiers, and autoregressive models, illustrating the potential for these findings to enhance model training practices across various generative frameworks" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper considers a density formula based on score estimation to analyze Denoising Diffusion Probabilistic Models (DDPMs). Using this formula, the authors provide a theoretical basis for why optimizing the evidence lower bound (ELBO) serves as an effective approach for training these models. The paper addresses the problem of the understanding of ELBO optimization for diffusion models, adding theoretical context to a widely used empirical technique.\n\nThe analysis extends to practical implications across different generative modeling contexts, including applications to GAN regularization, diffusion classifiers, and autoregressive models. By investigating these areas, the authors demonstrate how insights from the density formula can support training and optimization practices in various generative frameworks. This broad applicability suggests that the theoretical findings may be interesting to both foundational research and practical applications in generative modeling." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The biggest weakness of the work lies in the lack of novelty and the positioning with respect to the literature. \nIn particular, it is not evident how the work differs from known results such as [1], where Thm 4+ eq (25) and the comment in eq(29) seems to provide a result which is even more general than the one discussed by the authors. It is worth mentioning that related results which the authors do not cite in their work are also presented in [2], in particular Thm 3.\n\n\nOne other big limitation is that the authors derive their connection between continuous and discrete time (with an **approximated score**) by a sequence of approximations, without discussing properly the impact of these. A clear quantification analysis would greatly strengthen the paper. \n\n\n\n[1] Huang et al., A Variational Perspective on Diffusion-Based Generative Models and Score Matching, (NeurIPS 2021)\n\n[2] Song et al., Maximum Likelihood Training of Score-Based Diffusion Models (NeurIPS 2021)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "your SDE $d X_t = - \\frac{1}{2(1-t)} X_t d t + \\frac{1}{\\sqrt{1-t}} d B_t $ is a special case in Song's SDE in [1], which follows that\n\n$$d X_t = -\\frac{1}{2} \\beta_t X_t dt + \\sqrt{\\beta_t} d B_t$$\n\nIt appears that there are countless choices of $\\beta_t$. Why do you claim $\\beta_t = \\frac{1}{1-t}$ is the continuous-time limit of the aforementioned forward process in section 2.1 and is more preferred than Song's linear version $\\beta_t = \\beta_{\\min} + t (\\beta_{\\max} - \\beta_{\\min})$? \n\n[1] Score-Based Generative Modeling through Stochastic Differential Equations. ICLR'21." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The analysis of the variational gap for the continuous-time approximation of the discrete alternative of diffusion models, such as DDPM, is missing. The authors conducted clear and solid derivations to show why the variational gap is negligible, which provides proof for the empirical usage that the ELBO matches the true objectives." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Despite empirical advances, the theoretical foundation for why optimizing the evidence lower bound (ELBO) on the log-likelihood is effective for training diffusion generative models, such as DDPMs, remains largely unexplored. The authors proposed to address this question by establishing a density formula for a continuous-time diffusion process, which can be viewed as the continuous-time\nlimit of the forward process in an SGM. The formula shows that the variational gap is negligible." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. despite the soundness of the derivations, I found that the goal of this paper is not very interesting.\n\n2. insights on GANs are not clear to me (a layperson in GAN)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Several existing works also explore the relationship between the density and the optimization objectives of diffusion models, e.g. [1] and [2]. What's the relation between the current results and those in existing works. \n\n [1] Kong et al. \"Information Theoretic Diffusion.\"\n\n [2] Song et al. \"Maximum likelihood training of score-based diffusion models.\" \n\n2. In general variational inference, for fixed observations, maximizing the ELBO is equivalent to minimizing the KL divergence. What's new in the current results compared to the general observation? \n\n3. Are there error bounds for the various approximations? Without such bounds, why do we expect the interpretation using approximations to be better and more useful than the interpretation using lower bounds? \n\n4. Existing theoretical results have provided error bounds for KL divergence. What's the connection between those results and the current results? What additional insights can the current results bring?\n\n5. What's the advantage of the SDE in (2.4) over the more commonly used O-U process?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper provides a formula relating the target density and the scores at different time steps. \n\n2. The paper shows that maximizing the ELBO in DDPM is approximately equivalent to minimizing the KL divergence of the target distribution and the learned distribution." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper derives a density formula for a continuous-time diffusion process, which can be viewed as the continuous-time limit of the forward process of an SGM. The formula relates the target density and the score functions at different time steps. The authors use the formula to show that maximizing the ELBO in DDPM is approximately equivalent to minimizing the KL divergence of the target distribution and the learned distribution. The authors also apply the approximation to explain the use of score-matching regularization in GAN training, ELBO in diffusion classifier, and diffusion loss is autoregressive models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The contribution is unclear. The discussion about some important existing results are missing. \n\n2. The applications of the density formula presented here involves approximations, but there is no characterization of the approximation errors. \n\n3. The presentation needs improvement." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "* In light of the comments about weaknesses above, can you clearly spell out what are the novel contributions of the submitted article? I am not against revisiting known results to set the stage for the main contributions, but I have the impression that most of the conclusions drawn in Sec. 4, which is where the authors use their revisited formulation of the log data density, have been known to the community, also from the theoretical point of view, and not only from an heuristic perspective. Can you also answer to the questions raised in the \"weaknesses\" section of this review?\n\n* Despite the intelligible intent of reuniting continuous-time and discrete-time models, I find the exposition of results in Sec. 2 and Sec. 3, according to slightly different formulations than those existing in the literature, is confusing. Is there a way to organize this work such that contributions are more clear, and the implications of the presented theory spelled out well?\n\n* Would you feel comfortable by stating that your exact formulation of the log density of the data distribution as a function of the drift and diffusion terms of the SDEs, or equivalently the log density of the data distribution as a function of the transition kernels and noise of the discrete-time diffusion, as a novel result that has not been discussed in the literature?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* It is important to revisit known results that might have been obtained through carefully engineered heuristics, through the lenses of a sound theoretical formalism, such that the community can validate existing choices. The endeavor of this work is in line with this objective, which I think is valuable.\n* This work shows that the theory developed to derive an expression for the density of the data distribution can be applied to numerous modeling approaches to generative modeling.\n* The mathematical derivations in Appendix A (which are the most important to me), seem correct." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work is presented as a theoretical contribution to the field of diffusion models. \n\nThe overall structure of the paper mimics the intended objective of this work: to revisit both continuous-time and discrete-time diffusion models to arrive at the (exact and approximate) definition of a density expression for the log data density (in continuous and discrete time), that is used to: i) discuss the validity of an ELBO formulation for the optimization of the parameters of the denoising network of discrete diffusion models, ii) understand the optimization objective in generative adversarial networks, iii) provide a justification for classifier-based guidance in diffusion models, and iv) show that the diffusion loss used in autoregressive models corresponds to an approximate maximum likelihood solution." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* Sec. 2: this section repurposes known results from the literature, including [1,2,3], in which it has been shown the equivalence between discrete-time and continuous-time variants of diffusion. Note also that [3], which is not cited by the authors, shows that \"*the log-likelihood of score-based models can be tractably computed through a connection to continuous normalizing flows, even though the log-likelihood is not directly optimized by the weighted combination of score matching losses. In particular, it is shown that under a specific weighting scheme the score-matching objective upper bounds the negative log-likelihood, thus enabling approximate maximum likelihood training of score-based models.*\"\n* In sec 2.1, DDPM are revisited, but mixed with score functions, yielding Eq. 2.3. Why and how does the score function appears in discrete-time diffusion?\n* In sec 2.2, I am curious to learn why Eq. 2.4 has been chosen to be so specific, instead of using a more general form with a functional drift term. Here you specify a linear drift whose coefficients explode, compared to the typical variance preserving formulation from [1], as time $t \\to 1$.\n\n[1] Song et al. “Score-Based Generative Modeling through Stochastic Differential Equations”, https://arxiv.org/abs/2011.13456\n\n[2] Ho et al. “Denoising Diffusion Probabilistic Models”, https://arxiv.org/abs/2006.11239\n\n[3] Song et al. “Maximum Likelihood Training of Score-Based Diffusion Models”, https://arxiv.org/abs/2101.09258\n\n* Sec. 3: This section displays some calculations that rely on the continuous-time formulation of diffusion processes. Sec. 3.1 begins by focusing on Eq. 2.4, which is the linear variance preserving SDE discussed above. Sec. 3.2 continues the derivations, to relate continuous-time and discrete-time known results, and Sec. 3.3 discusses known results on the equivalence to a probability flow ODE and more recent results on density estimation. What are the main take home messages here? What is the original contribution the authors would like to put forward in this section?\nTo the best of my understanding, the result in Eq. 3.1.a is an exact formulation for the log likelihood of the data distribution that did not require, as done in [1,3], probability flow ODE equivalence. I followed the proof in Appendix A, and to my eyes it seems correct.\nSec 3.2 should also deserve more insights provided by the authors, as it gives an approximate log density for the discrete case, bypassing the need to work directly in discrete-time. Can we quantify the discretization errors that are introduced by relying on Eq 3.1.c?\n\n* Sec. 4: This is an “application” of the exact log density expression for the data distribution form Sec. 3.\nSec. 4.1 aims at discussing the validity of the ELBO formulation as a good proxy for the log likelihood, to demonstrate that in DDPM optimizing the ELBO is a valid replacement for optimizing log likelihood. This can also be understood from [2] and [1] above, and, for continuous time, is readily discussed in [3], which also shows the similarity (modulo discretization errors and constants) between continuous-time and discrete time formulations. So, what do we learn from the derivations presented in this section that were not directly discussed in these earlier work?\nSec. 4.2 begs for the same question, and should be reviewed in light of an overloaded notation: please check that $z$ is used both as a random variable sampled from a noise distribution, and as a normalizing factor.\nSec 4.3 revisits classifier guidance mechanisms for conditional generation using diffusion models, and offers a critic to some practical heuristics used in recent work, based on the density defined in this paper.\nSimilarly, Sec. 4.4 revisits autoregressive models in light of the proposed density definition, and suggest that the training objective used in the literature can be viewed as approximate maximum likelihood training." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024a,\ntitle={A Score-Based Density Formula, with Applications in Diffusion Generative Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4dAhjhm2Mm},\nnote={under review}\n}" }, "abstract": { "value": "Score-based generative models (SGMs) have revolutionized the field of generative modeling, achieving unprecedented success in generating realistic and diverse content. Despite empirical advances, the theoretical basis for why optimizing the evidence lower bound (ELBO) on the log-likelihood is effective for training diffusion generative models, such as DDPMs, remains largely unexplored. In this paper, we address this question by establishing a density formula for a continuous-time diffusion process, which can be viewed as the continuous-time limit of the forward process in an SGM. This formula reveals the connection between the target density and the score function associated with each step of the forward process. Building on this, we demonstrate that the minimizer of the optimization objective for training DDPMs nearly coincides with that of the true objective, providing a theoretical foundation for optimizing DDPMs using the ELBO. Furthermore, we offer new insights into the role of score-matching regularization in training GANs, the use of ELBO in diffusion classifiers, and the recently proposed diffusion loss." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "score-based density formula", "score-based generative model", "evidence lower bound", "denoising diffusion probabilistic model" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/3a962d5485bdd4e5687c010963acaba1c9d77efc.pdf" }, "presentation": null, "primary_area": { "value": "learning theory" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "A Score-Based Density Formula, with Applications in Diffusion Generative Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4dHyH42ha7
4DEditPro: Progressively Editing 4D Scenes from Monocular Videos with Text Prompts
main
Withdraw
4D scene editing;Diffusion model;4D Gaussian representation
applications to computer vision, audio, language, and other modalities
Jingyi Pan;Qiong Luo
~Jingyi_Pan1;~Qiong_Luo1
3;3;3;5
4;5;3;5
3;1;1;2
2;2;2;2
3;3;2;2
3.5
4.25
1.75
2
2.5
0.522233
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. What factors contribute to the suboptimal performance of Gaussian Editor results? Given that scenes in Tanks and Temples and SemanticKITTI datasets are static (lacking moving objects), would it not be more appropriate to compare with the 3D version of Gaussian Editor? Furthermore, when applied to static scenes, do the results of the 3D and 4D versions of Gaussian Editor differ, or are they effectively the same?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "1. First approach for 4D scene editing from casual monocular videos, eliminating the need for camera pose input.\n2. Introduced Temporally Propagated Editing (TPE) and Spatially Propagated Editing (SPE) modules to improve temporal and spatial consistency.\n3. Quantitative evaluations show better performance over baselines, indicating the proposed method’s effectiveness across multiple metrics." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces a framework for 4D scene editing from monocular videos guided by text prompts. The proposed techniques, Temporally Propagated Editing (TPE) and Spatially Propagated Editing (SPE), ensure temporal and spatial consistency in the editing process. By introducing progressive dynamic representation through 4DGS, the framework can model scene attributes without requiring camera pose as an input." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Temporal consistency is not maintained. In the supplementary video, noticeable flickering occurs in several segments, such as the sailing boat (00:26–00:28), Minecraft scene (00:45–00:47), horse editing (00:48–00:51), and statue editing (00:52–00:56). \n2. The synthesized novel views show minimal differentiation from the original video, as seen in segments 00:22–00:23 and 1:08–1:12.\n3. Furthermore, the supplementary video primarily demonstrates static view synthesis, despite the method being proposed for 4D editing.\n4. The editing results showcased in the supplementary materials are mostly focused on color, style, and texture adjustments, with minimal instances of object shape editing. This suggests the method’s contributions in editing might be overstated.\n5. In terms of comparisons, the paper primarily contrasts its approach with static 3D scene editing methods, even though it claims to support 4D editing. Given that the showcased editing focuses on color, style, and texture modifications, a more fitting baseline would involve applying a video style transfer technique to the input video, followed by reconstructing the 4D scene using methods designed for monocular videos." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Is there a dynamic demo available to assess the quality of the scene dynamics? I'd be interested in increasing my rating after seeing more extensive examples." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The task itself is valuable and timely, addressing the growing need for efficient 4D scene editing in casual videos.\n2. The paper is well-structured and clearly written, making it easy for readers to understand the methodology and approach.\n3. The experiments are thorough and appear to be well-designed, with enough detail provided to ensure reproducibility by others in the field." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents 4DEditPro, a new framework for editing 4D scenes in casual monocular videos using text prompts. Unlike conventional methods that require multi-view images or known camera poses, 4DEditPro works with single-view videos, allowing easy and consistent scene edits without extra setup. It achieves this by combining two modules: Temporally Propagated Editing (TPE) for smooth, time-consistent edits across frames. Spatially Propagated Editing (SPE) for spatial consistency by generating nearby “virtual views” to fill in missing details. Using a pose-free 4D Gaussian Splatting (4DGS) technique, 4DEditPro reconstructs scenes without needing camera poses, enabling flexible, high-quality editing. The approach is effective for both targeted edits and broader style changes, making text-driven video editing practical and seamless." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The process for selecting the reference token lacks detail, and it’s unclear how this selection impacts the final results.\n2. The pipeline doesn’t present any particularly innovative insights.\n3. mThe editing results seem somewhat imprecise; for example, in Figure 4, the \"silver\" and \"night\" edits appear unnatural." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Following the weaknesses, please consider:\n- Compare with video editing methods in spatial quality.\n- Compare with baseline Instruct 4D-to-4D with its already released code.\n- Compare with baseline Instruct 4D-to-4D by using the datasets it is using, i.e., DyCheck, DyNeRF, and Google Immersive, with the corresponding tasks.\n- Evaluate the method on more multi-view scene datasets." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "- The paper is overall written clearly, with clear formulas and descriptions.\n- In various editing tasks, the proposed method outperforms the baselines both quantitatively and qualitatively.\n- Ablation studies are provided to show the effectiveness of components." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies instruction-guided 4D scene editing. It proposes 4DEditPro, a method that uses two modules, TPE and SPE, to ensure the temporal and spatial consistency, and uses a pose-free 4DGS to reconstruct 4D scene from each viewpoint's videos. The proposed method can perform well in different 4D editing tasks in the evaluation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The reasonability of task \"4D scene editing using only casual videos\" is questionable. \n - The 4D scene editing's input is a 4D _scene_, which is like a 3D model in Blender but is dynamic, that should be able to be put in any coordinates and render any videos accordingly. \n - The conversion between causal videos and 4D scenes is closer to 4D scene _reconstruction_ than editing. This should not be regarded as challenging in 4D editing.\n - Therefore, the challenges that this paper aims to solve are far-fetched - they are brought from another task (i.e., 4D scene reconstruction) to obtain the input of the current task (i.e. 4D scene editing), but not a part of the current task with a valid input.\n - In fact, lots of the contents of the paper just aim to do reconstruction, e.g., in Sec 3.2. This part seems quite orthogonal to the editing part.\n- The model seems only working on monocular video, i.e. there is only one camera in the scene.\n - This significantly reduces the challenge of spatial 3D consistency. This might be the reason why a depth estimator (L335) can easily reconstruct the 3D structure.\n - When there is only one monocular video, the editing task then degrades to \"video editing with 3D consistency requirements.\" Therefore, video editing methods should be compared. However, they are not.\n - According to the demo video, all the scenes are monocular. This necessitates the comparison against video editing models.\n- The only baseline \"Instruct 4D-to-4D\" is not compared with. This is the only baseline that works in this task. It is crucial to compare with it.\n - The authors claimed that \"Instruct 4D-to-4D\"'s code is not publicly available. However, according to the Github repo of \"Instruct 4D-to-4D\", the code was released on 8/29, which is one month before the deadline of ICLR. This method should have, therefore, been compared against the paper.\n - Even if the code is not released, all the dataset used by Instruct 4D-to-4D are all public. Therefore, a comparison against Instruct 4D-to-4D should still have been achieved with those datasets and the same editing tasks as Instruct 4D-to-4D.\n- In Tab.1, only the \"Average\" row is marked bold on the best numbers. Other rows should also be marked (and it seems that \"Ours\" are always the best, so this should improve the soundness).\n- The model needs the user to provide descriptions of both original and edited scenes, which requires more human work. The baselines IN2N, GSEditor-4D, and Instruct 4D-to-4D only require editing instruction.\n- The only 4D scenes used for editing are just three monocular dynamic scenes. \n - As a comparison, the baseline Instruct 4D-to-4D compares with at least 3 monocular dynamic scenes and 5 multi-view dynamic scenes, covering DyCheck, DyNeRF, Google Immersive, etc, and as long as 300 frames. \n - Therefore, this paper's comparison experiments are significantly weaker and more incomplete than the baseline." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. The paper claims to achieve 4D scene editing from monocular video, but isn’t this simply a combination of monocular video editing and 4D reconstruction from monocular video? I’m uncertain why this qualifies as 4D editing.\n\n2. The explanation for setting the camera pose using Slerp is unclear. A more detailed clarification on this aspect would be helpful. Additionally, it would strengthen the paper to demonstrate how robustly the 4D reconstruction handles variations in camera poses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is in general well organized and easy to follow.\n2. The paper presents a pipeline for performing editing 4D from monocular video using Gaussian Splatting." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper propose a framework for 4D scene editing in casual monocular video using text prompt. Unlike traditional methods that require multi-view images or camera poses, 4DEditPro works without external tools by using two key modules: TPE for maintaining coherence across frames and SPE for enhancing spatial consistency. A pose-free 4D Gaussian Splatting(4DGS) approach further enables scene reconstruction and editing without pre-calculated poses. Experiments demonstrate the result of this 4DEditPro through both qualitative and quantitative results, as well as user evaluations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The key contribution of the proposed method (TPE, SPE) appears to lie in the integration of several minor techniques, such as feature extraction and injection into the video diffusion model. The novelty seems lacking, without too much novel insight. \n\n2. The advantages of TPE are not clear. Comparative experiments with previous off-the-shelf video editing models(e.g., TokenFlow[1], Fatezero[2], Flatten[3]) would be essential to demonstrate TPE’s advantages. However, this paper only includes comparisons with 3D editing models like GSEditor-4D.\n\n3. The pose changes in the novel view synthesis from the monocular video in the demo video appear too subtle. These result seems closer to video editing and falls somewhat short of being considered 4D editing. A detailed disclosure of how the authors set the poses in this experiment with monocular video would enhance the understanding of this paper’s strengths.\n\n4. Additionally, while the paper claims to achieve 4D editing, no experiments exist on 4D datasets. Using representative 4D datasets like DyNeRF[4] and HyperNeRF[5], as well as comparisons with other 4D editing models (e.g.,Instruct 4D-to-4D[6]), would make the paper’s argument more persuasive.\n\n[1] Geyer, Michal, et al. \"Tokenflow: Consistent diffusion features for consistent video editing.\" arXiv preprint arXiv:2307.10373 (2023). \n[2] Qi, Chenyang, et al. \"Fatezero: Fusing attentions for zero-shot text-based video editing.\" Proceedings of the IEEE/CVF International Conference on Computer Vision. 2023. \n[3] Cong, Yuren, et al. \"Flatten: optical flow-guided attention for consistent text-to-video editing.\" arXiv preprint arXiv:2310.05922 (2023). \n[4] Li, Tianye, et al. \"Neural 3d video synthesis from multi-view video.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2022. \n[5] Park, Keunhong, et al. \"Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields.\" arXiv preprint arXiv:2106.13228 (2021). \n[6] Mou, Linzhan, et al. \"Instruct 4D-to-4D: Editing 4D Scenes as Pseudo-3D Scenes Using 2D Diffusion.\" Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@misc{\npan2024deditpro,\ntitle={4{DE}ditPro: Progressively Editing 4D Scenes from Monocular Videos with Text Prompts},\nauthor={Jingyi Pan and Qiong Luo},\nyear={2024},\nurl={https://openreview.net/forum?id=4dHyH42ha7}\n}" }, "abstract": { "value": "Editing 4D scenes using text prompts is a novel task made possible by advances in text-to-image diffusion models and differentiable scene representations. However, conventional approaches typically use multi-view images or videos with camera poses as input, which causes inconsistencies when editing monocular videos due to the reliance of these tools on iteratively per-image editing and the absence of multi-view supervision.\nFurthermore, these techniques usually require external Structure-from-Motion (SfM) libraries for camera pose estimation, which can be impractical for casual monocular videos. \nTo tackle these hurdles, we present 4DEditPro, a novel framework that enables consistent 4D scene editing on casual monocular videos with text prompts. \nIn our 4DEditPro, the Temporally Propagated Editing (TPE) module guides the diffusion model to ensure temporal coherence across all input frames in scene editing.\nFurthermore, the Spatially Propagated Editing (SPE) module in 4DEditPro introduces auxiliary novel views near the camera trajectory to enhance the spatial consistency of edited scenes. \n4DEditPro employs a pose-free 4D Gaussian Splatting (4DGS) approach for reconstructing dynamic scenes from monocular videos, which progressively recovers relative camera poses, reconstructs the scene, and facilitates scene editing.\nWe have conducted extensive experiments to demonstrate the effectiveness of our approach, including both quantitative measures and user studies." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Jingyi_Pan1", "~Qiong_Luo1" ] }, "authors": { "value": [ "Jingyi Pan", "Qiong Luo" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "4D scene editing", "Diffusion model", "4D Gaussian representation" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "pan|4deditpro_progressively_editing_4d_scenes_from_monocular_videos_with_text_prompts" }, "pdf": { "value": "/pdf/09826e79f04c6752fb35c600ea3562e77beee8a8.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/3ac8afcf89b69e45f5249f87295741236aa94d34.zip" }, "title": { "value": "4DEditPro: Progressively Editing 4D Scenes from Monocular Videos with Text Prompts" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4dhTYe5pjD
Low Variance: A Bottleneck in Diffusion-Based Graph Imputation
main
Active
diffusion-based imputation;missing features;graph neural networks
learning on graphs and other geometries & topologies
3;3;5;6
4;3;4;5
2;2;3;3
1;2;2;3
3;2;3;3
4.25
4
2.5
2
2.75
0.816497
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. In analysis, why does the authors say that the complexity of Dijkstra algorithm is O(n^2)? In fact, its complexity is O(nlogn) with a heap. Please clarify if a specific implementation of Dijkstra's algorithm is used that results in O(n^2) complexity, or if this is an error that needs correction?\n2. How does the performance of FISF compare to other imputation methods in terms of scalability and computational efficiency?\n3. Does the experimental dataset take the largest connected block? How does the method perform on non-fully connected datasets?\n4. How does the performance of FISF on the heterophilic graphs?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper is well-written and well-structured, with clear explanations and figures that facilitate understanding.\n2. The paper presents a novel approach to address the low-variance problem in diffusion-based graph imputation, which has not been explored extensively in previous work.\n3. The paper provides strong empirical evidence and theoretical proofs to support its claims, making the contribution robust and reliable.\n4. The proposed method, FISF, demonstrates superior performance in various graph learning tasks, making it a valuable tool for researchers and practitioners working with graphs containing missing features." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates the limitations of diffusion-based graph imputation methods, particularly focusing on the issue of “low variance” channels. These channels contain imputed features with nearly identical values across nodes, leading to limited information for graph learning tasks. To address this problem, the authors propose a novel imputation method called FISF (Feature Imputation with Synthetic Features). FISF injects synthetic features into low-variance channels, increasing their variance and enhancing the distinctiveness of node representations. The paper presents empirical evidence and theoretical proofs demonstrating the effectiveness of FISF in various graph learning tasks, including semi-supervised node classification and link prediction, with varying missing rates." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The complexity of the proposed method is confusing. For example, why the complexity contains O(|$\\mathcal{E}$|) needs more clarification.\n2. While the paper compares FISF with several existing methods, a more in-depth analysis and comparison with alternative methods, particularly in terms of scalability and computational efficiency, would strengthen the contribution. For example, authors can give running time comparisons on large graphs such as OGBN-Arxiv.\n3. The performance discussion on heterophilic graphs is missing, while the competitor FP gives the analysis that diffusion based methods are not suitable for heterophilic graphs. The authors should clarify whether such a limitation still exists in FISF." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1) In my view, the notion of low-variance channels need to be discussed in more depth. In one sense, low variance is not necessarily an issue, as it depends on the nature of the task - for node classification, if there is high homophily in labels, low variance is not necessarily bad. In other words, how low is “low variance” should perhaps be explained more clearly.\n\n2) It looks to me the pre-diffusion step is critical in determining which channels have low variance. Is it always best to allow the diffusion to (nearly) converge or we control this in a more adaptive fashion? \n\n3) Why choosing only one node to inject synthetic feature? Can it be selected in a more informative way than randomly? Also, what’s the impact of r (number of channels to inject synthetic features)?\n\n4) Can the proposed method be tested on datasets with low label homophily? In my view this is when the proposed method might show clearest advantages over baselines such as FP." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1) The paper investigates an under-explored but practically important setting in graph machine learning.\n\n2) The technical framework is intuitive and simple to implement. It is also presented clearly with the necessarily detail. \n\n3) Experimental results and analyses in Appendix are comprehensive in providing evidence the proposed method works well in practice." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel methodology for solving graph machine learning tasks with missing input node features. The main idea consists of three steps: 1) pre-diffusion; 2) identify low-variance feature channel for synthetic feature injection; 3) post injection diffusion. Experimental results demonstrate empirically the effectiveness of the proposed method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1) The proposed method, while intuitive, lacks sufficient theoretical justification. It is not entirely clear why injecting random features and re-run diffusion would help, apart from mechanistically forcing features not to converge to uniform values. It would be good if the authors can provide more theoretical investigation of the proposed method, perhaps from the viewpoint of expressivity or spectral analysis. At the moment, the theoretical contribution appears limited in my view.\n\n2) Some modelling choices seem ad-hoc and need more justification and validation (see Questions below).\n\n3) Empirical experiments seem to only focus on datasets where reasonably high homophily in node labels. This somewhat limits the understanding of effectiveness of the proposed method. It would be good to see the method tested against baselines in low homophily settings." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "same as in weakness" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The proposed method demonstrates promising results across multiple datasets, including a real-world application, highlighting its practical effectiveness and versatility.\n2. The method is supported by theoretical analysis, which strengthens the validity of the approach.\n3. The application of diffusion-based techniques on graphs is intriguing" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, they address learning tasks on graphs with missing features. Traditional graph diffusion-based imputation methods often yield low-variance channels with nearly identical values, which contribute minimally to graph learning performance. To counteract this, they introduce synthetic features that reduce low-variance production in diffusion-based imputation, thereby enhancing feature diversity. They provide both empirical and theoretical validation of the low-variance issue." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The motivation for this study requires further clarification, particularly in establishing a clear connection between missing features and their impact on graph learning performance. The logical link between the presence of missing features and the degradation in model performance is not thoroughly articulated.\n\n2. The problem setting requires further clarification. The term “missing features” is too broad, as it could refer to missing graph structure or node features, each posing distinct challenges. It’s important to specify the type of missing data being addressed and to clearly illustrate the characteristics and implications of different types of feature missingness. A more precise explanation would help readers understand the unique challenges of the specific missing-feature scenario considered in this paper and how it influences the choice of methods.\n\n3.The scalability of the proposed method is not thoroughly discussed, particularly concerning large-scale graphs or graphs with extremely high missing feature rates." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The authors argued in D3 that their method is better than positional encoding: in table 20 that under 99.5% missing rate, FIST outperforms positional encoding (node2vec). Can we get some sensitivity analysis on the other missing rate and different positional encoding techniques? Also, some of the experiment details are lacking." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The authors identified a low variance issue exacerbated during diffusion to fill in the missing values. Clearly such channels do not provide much information in many downstream tasks. They proposed to inject random noise and re-diffuse using a very similary method to PCFI, with a hyperparameter to allow the synthetic data to have wider range influence. The paper is easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors found the diffusion methods on graphs to impute missing data reinforce a low variance problem in feature channels, hindering the model performance. They proposed to inject random noise on such channels and re-diffuse on synthetic labels." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My concerns are mostly on the novelty of the paper. \n\nThe paper method description, analysis, and experimentation follow PCFI. The authors pointed out that low variance channels are causing issues in downstream task learning. Injecting random noise and then letting the feature diffuse from the “noisy nodes” does increase the variance, help distinguish the nodes and allow some structural information encoded in the process. But the process does not seem to be much different from PCFI, the methods described in this feel more like an implementation variation/details in PCFI. \n\nAdditionally, there has been published work to address such issues, as the authors reviewed in 2.2, positional encoding etc." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "For graphs with missing features, we identify a problem in existing diffusion methods and propose a novel scheme that addresses this issue." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024low,\ntitle={Low Variance: A Bottleneck in Diffusion-Based Graph Imputation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4dhTYe5pjD},\nnote={under review}\n}" }, "abstract": { "value": "In this paper, we tackle learning tasks on graphs with missing features, improving the applicability of graph neural networks to real-world graph-structured data. Existing imputation methods based upon graph diffusion produce channels that have nearly identical values within each channel, and these low-variance channels contribute very little to performance in graph learning tasks. To prevent diffusion-based imputation from producing low-variance channels, we introduce synthetic features that address the cause of the production, thereby increasing variance in low-variance channels. Since the synthetic features prevent diffusion-based imputation models from generating meaningless feature values shared across all nodes, our synthetic feature propagation design prevents significant performance degradation, even under extreme missing rates. Extensive experiments demonstrate the effectiveness of our scheme across various graph learning tasks with missing features, ranging from low to extremely high missing rates. Moreover, we provide empirical evidence and theoretical proof that validate the low-variance problem." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "diffusion-based imputation", "missing features", "graph neural networks" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/74d21a220b0a9af44e47a58e56ac5f9527cca64e.pdf" }, "presentation": null, "primary_area": { "value": "learning on graphs and other geometries & topologies" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Low Variance: A Bottleneck in Diffusion-Based Graph Imputation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4dtwyV7XyW
Toward Principled Transformers for Knowledge Tracing
main
Active
educational data mining;knowledge tracing;transformer
other topics in machine learning (i.e., none of the above)
3;3;3;5
4;4;2;4
2;2;2;2
1;1;2;2
3;2;1;3
3.5
3.5
2
1.5
2.25
0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Refer to the weakness above." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.KTST demonstrates promising performance.\n2.The authors conducted comprehensive comparative experiments and provided an in-depth analysis of the results.\n3.The model diagram is clear and straightforward, enhancing readability and understanding." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents knowledge tracing set transformers (KTSTs), a class of streamlined models designed specifically for knowledge tracing prediction tasks. To account for the unique characteristics of these tasks, this work introduces a simplified, learnable variant of the attention matrix and an interaction representation that does not rely on domain-specific knowledge. In experiments on standardized benchmark datasets, KTST achieves new state-of-the-art performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The primary motivation is not clearly presented in the text, and the structure of sections reveals some issues in logical flow, with parts containing redundant explanations.\n2. The expression “This model class … flawed evaluation” in the abstract is convoluted and unclear, making it difficult for readers to grasp the core motivation of the study.\n3. The Introduction lacks an illustrative figure that directly presents the research problem, and the content in this section appears insufficient.\n4. While I understand that the authors place part of the Related Work in Section 4 to emphasize their contributions, the extensive descriptions might raise questions regarding the sufficiency of the work’s original contributions.\n5. Section 4.2 primarily introduces the learnable modification of attention matrices. Could you please explain how it differs from ALiBi [1]?\n6. Section 4.3 mainly addresses the handling of multi-concept and identifies it as one of the paper’s research questions. As far as I know, related works [2,3,4] have largely resolved this issue, so what are the significant advantages of this approach?\n7. The experiments in Section 5.3 introduce a randomly simulated dataset for multi-concept knowledge, aimed at comparing three embedding methods. In addition to random simulations, including a specially designed simulation method could make the comparisons more compelling.\n\n[1] Press O, Smith N A, Lewis M. Train short, test long: Attention with linear biases enables input length extrapolation[J]. arXiv preprint arXiv:2108.12409, 2021.\n\n[2] Long T, Liu Y, Shen J, et al. Tracing knowledge state with individual cognition and acquisition estimation[C]//Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. 2021: 173-182.\n\n[3] Zhang M, Zhu X, Zhang C, et al. Multi-factors aware dual-attentional knowledge tracing[C]//Proceedings of the 30th ACM International Conference on Information & Knowledge Management. 2021: 2588-2597.\n\n[4] Cui J, Chen Z, Zhou A, et al. Fine-grained interaction modeling with multi-relational transformer for knowledge tracing[J]. ACM Transactions on Information Systems, 2023, 41(4): 1-26." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "**(a)** Regarding the expanded representation works, I wanted to ask the authors how it works for a query with more than one knowledge component. Specifically, let’s say for a query X_5, after passing all the interactions Y_1 to Y_4, how do we query X_5 if it has more than one knowledge component? If for example the query has 5 knowledge components, do we have to give 4 queries (with each knowledge component in a single query) and simply ignore the output until the last (5th) query is given?\n\n**(b)** Regarding the results, Lines 515-520, state that MHSA works better for larger data and larger Component-to-question ratio. But at the same time MHSA (compared to the mean method), introduces learnable parameters that require data. So I’m not 100% sure if the issue is really the ratio, or just the low amount of data in certain datasets? I would appreciate it if this can be disentangled (with an experiment)." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "**(a)** The related work discussion is very thorough and, even though I'm not familiar with the literature, the authors do a great job in covering many relevant works on every single design choice. The presentation of such related work, however, needs some rework (see weakness **(a)** below)\n\n**(b)** The experiments seem to be very thorough (it is mentioned that there are multiple initialization and hyper-parameters tested for every baseline). The ablation is also well-done in terms of different design choices (cf. Table 3. with inputting different queries to the attention blocks or Table 2&3 (bottom) with different component aggregation strategies).\n\n**(c)** The numbers overall also seem promising. Although I'm confused whether the (mean) and (unique) lines from Tables 2 and 3 can be considered as their contribution? (see Weakness **(d)** below)." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper tackles the task of knowledge tracing, which can sort of be summarized to predicting a binary correctness value for a response R to a question Q with certain knowledge components C (i.e concepts), conditioned on the previous questions (and their components and responses). The authors use the transformer architecture for this (which seem to also be explored in prior work) and propose a specific Multi-head Self-Attention block that is suited to the task. Specifically, it is argued that the the Knowledge Components should be parsed in a permutation-agnostic way. There are also other approaches such as simply taking the mean of all the components that are studied.\n\nThe experiments study quantitatively study such design choices and also explore other aspects of the data, such as the average number of components per question and how it relates to the used method." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**(a)** I really appreciate the authors being thorough when discussing the related work, but I think there is a lot of room for improving the discussion. Specifically, the current version can sometimes be very confusing to someone who is not entirely familiar with the literature. \n\nFor example, take a look at Lines 108-136 (‘Limitations of related work’). I don’t understand why this section has to come after the Problem Setting (Section 3) and why it is not already covered in the previous Related Work discussion (Section 2). I understand that the authors needed to rely on the defined notation (in Line 121), but most of the text could be merged in the previous discussion on the Related Work. You could also potentially move the problem setting to precede Section 2.\n\nThe related work discussion is again further spread to other sections, see the Lines 198-207 and Lines 252-259. This really makes it hard for me to assess where this paper stands in comparison to related work, as suddenly later it is revealed that there are other works who have also explored similar directions, which didn’t seem to be the case in Section 2. I urge the authors to significantly rework the related work discussion. For example, you could include a paragraph on all the transformer-based Knowledge Tracing methods and maybe also a paragraph on the works who explored different attention mechanisms. These should all be in the Section 2 (Related Work) and not interleaved in the method discussion!\n\nOne other example of this issue would be Line 286-288: “We propose three interaction embeddings”. And later in Line 307-208 two of which (`mean` and `unique`) turn out to be used in prior work and only the third one is claimed novel. Again, as a reader, it is very confusing where this paper stands.\n\n**(b)** The overall related work discussion can also sometimes be too abstract. For example, Lines 127-128 state “Without proper masking …, this introduces label leakage”. This sentence for example is not clear what is meant by neither “proper masking” nor “label leakage”. \n\n\n**(c)** There are certain instances where the authors criticize prior work on weak grounds. For example, in Lines 200-202 it is stated that prior RNN-based work has certain inductive biases by having a hidden state associated with a student's knowledge, and later it is stated that the paper’s [transformer-based] approach is conceptually simpler. I cannot 100% agree with such a comparison. I don’t agree why having an inductive bias, as long as it’s general-enough, would be a downside and certainly don’t agree with transformers being “conceptually simpler” than an RNN. \n\n**(d)** Regarding the results, it seems that the most-relevant line in the tables is `KTST (MHSA)` as it is stated in line 309-310 that the `(mean)` and `(unique)` methods are from prior work. If that is the case, then the tables actually don't seem that promising since it's always under-performing prior work? I would like to ask the authors to clarify this.\n\n\n----- Minor Issues -----\n\n**(e)** The very first paragraph in the introduction needs 1-2 more sentences to further clarify what the setting is. It should clarify that the context is student-computer interaction and this is supposed to be used for digital education.\n\n**(f)** Line 122-123, at the end, it is stated: “One consequence is an increase …”. But later in the text there is no second consequence. \n\n**(g)** Currently Figure 2 is not very optimal, as it is basically the vanilla transformer architecture, except for X, Y, and Rs as inputs and outputs. I think the figure should be with further detail, demonstrating all the token types (e.g knowledge components and questions). A significant part of your method is also the aggregation, which again the figure is not explaining. The “causal masking” should ideally also be demonstrated in the figure.\n\n**(h)** I think the Figure 1 is really not that informative. It's just with random numbers and shapes and it really doesn't describe the problem. I think something like the figures in this talk (https://youtu.be/8ITtYnhslvE?si=ExSW6WGShqNTTTiu&t=106) would be more informative to someone not familiar with this topic." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper is clearly presented, effectively clarifying both the motivation and model architecture. The Transformer-based model is also straightforward to understand and implement. Additionally, the evaluations are quite comprehensive, comparing performance across 22 benchmark models, which clearly demonstrates the advantages of KTST in predicting student performance." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes Knowledge Tracing Set Transformers (KTSTs) for predicting student performance. Unlike domain-specific models, KTSTs use Transformer as a backbone and a learnable attention mechanism to handle student interaction data. KTSTs also learn set representations for knowledge components. The model outperforms or matches state-of-the-art results on multiple educational benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- This work contrasts itself with models incorporating domain-specific knowledge (i.e., models with more interpretable components). However, the literature review overlooks a lot of newly published interpretable models. The focus of the review should shift towards domain-inspired and other Transformer-based models rather than general deep learning models. The authors should expand the review to include recent interpretable models and provide a comparative analysis.\n\n- In Sections 2 and 4, the repeated claim that “In contrast to KTSTs, most related work includes domain-inspired components that increase model complexity” lacks a clear comparison of these components and a quantification of the added complexity. Thus, I have the following questions and confusions: \n\n 1. I am unclear on the criticism of existing domain-inspired models\n - The inductive biases in architectures discussed in the paper, such as memory-augmented neural networks, question difficulty models, and graph neural networks for knowledge structures, are well-motivated within the educational domain. They reflect human learning processes, are generally beneficial in educational contexts and are not so specific as to be limited to particular subjects or cultures. From my perspective, the contribution of this work—specifically, the multi-head attention mechanism with learnable exponential decay on attention weights—is a re-formulation of embedding memory priors into the model. Similarly, the permutation invariance of concept representations functions as another form of regularization on the concept graph structure.\n\n - Regarding “interaction representations proposed in related work are often domain inspired and unnecessarily complex. “ Could the authors provide concrete examples on the domain-inspired and unnecessarily complex embeddings in existing works?\n\n 2. For the compared benchmarks, could the authors compare the complexity of these models quantitatively, including the training and evaluation time and the amount of model parameters? \n\n 3. Domain-inspired models are generally motivated by their effectiveness with small datasets and their interpretability, rather than purely optimizing prediction performance. To evaluate this, I suggest two additional experiments:\n\n - Could the authors conduct experiments on smaller datasets? Currently, models are trained on sequences of up to 200 consecutive interactions, which is extensive for educational data. Reducing the sequence length and the number of students would provide insight into model performance on limited data.\n\n - Could the authors analyze the embeddings learned by the model, such as the representations for knowledge components and question embeddings? This would provide interpretability insights into the regularization of permutation invariance. \n\n- I do appreciate the insight that concept representations should be permutation invariant. Could the authors include an ablation study to examine the impact of this design choice? Specifically: 1) test the model without enforcing permutation invariance on concept representations; 2) remove the knowledge component embeddings altogether and only keep the question embeddings. \n\n- I find it challenging to pinpoint the technical contributions of this work. The learnable attention weights component (Section 4.2) seems primarily to add flexibility to existing domain-inspired representations. Additionally, although three choices for set representations are explored, the novel approach (MHSA) offers only marginal advantages over mean embedding when training sequences exceed 4,000 interactions and each question includes 6 knowledge components in synthetic data. This seems not that applicable to real-world datasets as shown in the experiments on KT data." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- What are the differences between the improved ALiBi in this paper and the method in [1]?\n\n- What is the motivation for using ALiBi in knowledge tracing models?\n\n- What are the main contributions this paper?\n\n- Why is simulated data used instead of real data in Section 5.3?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- This paper focuses on knowledge tracing, which is an important and interesting topic in the educational community.\n\n- The writing is clear and the structure easy to follow.\n\n- The authors compare a substantial number of baselines, which enhances the credibility of the experimental results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces Knowledge Tracing Set Transformers (KTSTs), a simpler, principled model class for knowledge tracing tasks that avoids complex, domain-specific designs by employing set representations of student interactions and a learnable attention modification for positional information." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The novelty of this paper may not fully meet the conference’s expectations. It appears to incorporate established methods to enhance model performance without sufficiently clarifying the research problem and motivation. The paper claims to improve ALiBi by making its matrix parameter learnable to provide positional information. However, a similar approach for learnable ALiBi matrix parameter was already proposed in [1]. How does this paper’s method differ from that approach?\n\n- Although the authors propose ALiBi with learnable parameters to supply position information to attention and introduce aggregation functions, the ablation study focuses solely on ALiBi, which appears insufficiently comprehensive. It remains unclear to what extent each component affects model performance.\n\n- Figure 3 shows little differences among various aggregation functions. It's unclear why simulated data is used instead of real data.\n\n- The paper claims the proposed method is “simpler than previous state-of-the-art approaches,” yet does not provide relevant experiments, such as an analysis of parameter count or computational cost.\n\n- Although significance testing is conducted, the performance improvement observed is modest. From a practical standpoint, it remains uncertain whether this advancement is substantial enough to significantly impact the field of knowledge tracing.\n\n- Figure 2 lacks clarity and omits essential annotations. For instance, what are the specific inputs for Q, K, and V? What does the pink box in the lower left corner represent?\n\n- There is no analysis of the learnable attention matrices to investigate what exactly influences model performance. I recommend adding experiments to enhance understanding.\n\n\n--- \n[1] Chi, Ta-Chung, et al. \"Kerple: Kernelized relative positional embedding for length extrapolation.\" NeurIPS, 2022." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose knowledge tracing set transformers, a straightforward model class for knowledge tracing that is conceptually simpler than previous state-of-the-art approaches while outperforming them on standardized benchmark datasets." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024toward,\ntitle={Toward Principled Transformers for Knowledge Tracing},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4dtwyV7XyW},\nnote={under review}\n}" }, "abstract": { "value": "Knowledge tracing aims to reason about changes in students' knowledge and to predict students' performance in educational learning settings. We propose knowledge tracing set transformers (KTSTs), a straightforward model class for knowledge tracing prediction tasks. This model class is conceptually simpler than previous state-of-the-art approaches, which are overly complex due to domain-inspired components, and which are in part based on suboptimal design choices and flawed evaluation. In contrast, for KTSTs we propose principled set representations of student interactions and a simplified variant of learnable modification of attention matrices for positional information in a student's learning history. While being largely domain-agnostic, the proposed model class thus accounts for characteristic traits of knowledge tracing tasks. In extensive empirical experiments on standardized benchmark datasets, KTSTs establish new state-of-the-art performance." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "educational data mining", "knowledge tracing", "transformer" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/23b74800ce9de99d3b7ec062c0974fddd362e5e6.pdf" }, "presentation": null, "primary_area": { "value": "other topics in machine learning (i.e., none of the above)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/abfa67cad32d63b32855c867a170330f383653d6.zip" }, "title": { "value": "Toward Principled Transformers for Knowledge Tracing" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4es2oO9tw1
Compute-Constrained Data Selection
main
Active
Data Selection;Compute-constrained;Scaling Laws.
foundation or frontier models, including LLMs
3;5;6;8
3;3;3;4
1;3;2;4
1;3;1;4
2;3;3;4
5.5
3.25
2.5
2.25
3
0.800641
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "No ethics concerns." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "The value of the strategies that depend on similarity of samples to validation samples worry me in that they seem very dependent on the size of the validation set, and that if the validation set is too small one might overfit. But perhaps it doesn't matter too much since you are always selecting some large number of training samples anyways, and so even if the validation set is as small as Paris (to use a 2D example), you still correctly pick the subset of training samples in Europe and dump the ones in the Americas, and that wouldn't have changed much if the validation set was all of France instead of just Paris. Be great to see some discussion & experiments about this, even if they are tiny, in this paper. \n\nSee also the weaknesses section." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "I found this is an important experimental contribution for practitioners and academics alike, and is likely to be heavily cited in the future. While there will inevitably be some discussion of whether they compared to all the right and best methods, I think that's in the details: they compared good and sufficiently recent example methods from high level strategies and showed significant enough differences that seem endemic to these different strategies." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Surveys and experimentally compares different data selection methods for LLM fine-tuning, and reasonably and quantitatively concludes that only rather cheap methods that choose train samples based on some cheap similarity to the validation samples are likely to be worthwhile, but depends (of course) on how much training computation you are going to run." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The weaknesses I detail below should all be corrected, but they are all minor, none of them individually or in total would be a good reason to reject the paper. \n\nSECTION 3 PROBLEMS:\n\nAt the beginning of Section 3: “Our goal is to find the optimal subset S ⊆ X” pretty sure you mean subset S ⊆ D there?\n\nI think you are implying that the train set is not necessarily IID with the validation set, but that the validation set is IID with the test set. All I see you say is that the validation set is “correlated” with the test set, which is a really weak and vague thing to say, but if that’s all you want to say, okay, but I will be curious if in your experiments you actually make the Val and Test sets IID. \n\nYou need to define that $T$ represents a training procedure, you just use it without defining it now. \n\n“By ranking the data points….” Given a large initial train set D, having to rank the datapoints at cost O(D log D) is not free, hope you guys are taking that into account. Of course, you might argue just touching all D samples is O(D), but that is less relevant if, say, we have an infinite generator of data D (e.g. a real-time reader of the datastream formerly known as Twitter) and an independent (non-ranking) decider of whether each incoming $x$ is worth training on, that is, we shouldn’t have to assume we need to sort at cost O(D log D).\n\n\nI’m uncomfortable as a reader that in (2) you are still defining your objective in terms of the test set. I agree that’s the ultimate goal, but if you actually implemented (2) it assumes knowledge of the test set. By the time you get to (2), I expected you to have switched to the validation set in the stated objective, which is different than the final metric, which should of course than be on the test set. \n\n\nSECTION 4 FEEDBACK: \nYou can cut some of the intro to Sec 4, but please add-in that Lexicon-based and Embedding-based are both strategies that try to select train samples that are similar to the validation samples, whereas Perplexity and Gradient solutions are optimizing for the effect on the model loss.\n\nSECTION 5 FEEDBACK:\nWhy do you assume training on all x is equal? Is that really true (honest question)? My guess is yes due to the very beaurocratic nature of how these models are run, but that’s not always true of machine-learned models, for example, a classic decision tree is much faster to evaluate for some inputs than others (if it has leaves of varying depths). \n\nIn computing C(k), you sum over C_v(x), which I assume is for x \\in D? Please be explicit there about which x you are summing over. And I’m surprised that that cost does depend on $x$ Does C_v(x) really vary so much? Could that not just be \\|D\\| (= size of D) times some cost per training sample? \n\n\nRANDOM: Really appreciate you comparing to just a random sample as a baseline. \n\nMINOR BUT IMPORTANT QUIBBLES: \nAuthors state too unequivocally: “in practice, the total compute budget is\npredetermined: the number of accelerators and their usage hours are allocated in advanced”. That certainly is NOT true in many large companies that are actively training and leading with LLMs. So please hedge and preface that sentence with “In many cases,”. \n\n\nThis sentence didn’t make sense to me:\n“For example work on parameter efficient fine-tuning, targets improving the\nmemory-usage of this stage (Hu et al., 2021).”\n\nTYPO “create an minimal” -> “a minimal”\n\nSince you are citing John 1975, consider also citing the famous and foundational Hart 1968 paper on Condensed Nearest Neighbors, the canonical and classic approach for selecting data for training (summarized e.g. in wikipedia: https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm). However, nearest neighbors is such a different machine learning paradigm that if you don’t feel that adds value to this work, you can ignore this suggestion, but given the statement in your paper is “Data\nselection is a foundational approach in machine learning where the objective is to create an minimal\ndataset from a collection of data” the Hart 1968 paper is exactly the classic paper to have done just that.\n\nTYPO “Data selection takes the full training data as input and\nchooses a subset to the train” -> “to train”\n\nThis sentence doesn’t quite parse, please re-write “Instruction-tuned models can handle a variety of possible inputs for downstream use cases as\neither classification or generative model”\n\nThis sentence needs some polishing of singular vs plural:\n“Therefore\nwhile instruction-tuning is not the direct focus of this work, it provide a real-world applications of\ncompute-constrained data selection.”\n\n\n“Assuming we at minimal” -> “at minimum”\n\nEquation (2) can be written all on one line, winning you a bit of precious space. \n\nIn 4.1 you refer to Section 4.1, which is a bit weird and really you can just delete that whole sentence. \n\nDo figure out how to bold the last column title C_forward in Table 1. It can be done (probably \\mathbf{}). \n\nTYPO: Fit of Compute-Performace Relationship -> Performance" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Could the author discuss a real-world scenario to demonstrate how the proposed methods could be applied to guide practitioners?\n2. Are the studied methods sensitive to the choice of model architecture?\n3. How do these methods scale with hardware improvements?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "This paper addresses compute-efficient fine-tuning, which is an important task in training LLM. Extensive simulations are conducted to provide empirical evidence and support the framework." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies a framework considering the practical challenges of training and fine-tuning large language models (LLMs) under computational constraints. It has established a trade-off between achieving better performance using larger data and lowering computational costs by selecting smaller subsets of data. A key takeaway is that simpler data selection methods, such as lexicon-based and embedding-based approaches, often provide a more efficient solution compared with more complex, compute-intensive methods like perplexity-based and gradient-based strategies." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Although the author claims some simple methods such as Lexicon outperform the complex ones such as Perplexity and Gradient, as shown in Figure 1, the complex ones perform quite well especially under medium and large budget situations. It would be more important to study the tipping point, where the performance gains plateau became flat. This is the place where further increases in computing resources yield diminishing returns. \n2. It is not surprising to see the tradeoff between performance and data size. The conclusions in this paper are largely empirical and may not generalize well to other situations. The practical limit of parametric fit is limited, as it mainly fits observed data points without clear guidance on how to *extrapolate* results to new scenarios. For example, can the results from smaller models (e.g., 13B) be used to predict outcomes for larger models (e.g., 70B)? Can the parameters estimated from smaller models be reliably transferred for larger model? If practitioners need to run experiments on 70B models to obtain these insights and fit the parametric model, the results may not be useful." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See the weaknesses part." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "1. This paper considers an interesting problem, data selection under computational constraints, and has interesting observations that the initial cost cannot be neglected when considering the computational budget." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper considers selecting data for finetuning LLMs under a computational budget. The computational cost is divided into two parts: 1) when using the validation set to evaluate the performance, the validation set will incur an initial cost; 2) training on each sample will cost a fixed amount of computation. The authors propose an exponential distribution model to fit the model performance v.s. the training costs for four types of data selection methods: lexicon-based, embedding-based, perplexity-based, and gradient-based. The paper consists of numerical experiments over several models and several tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. (Major) Lack of novelty: although this paper proposes a framework for analyzing the computational cost of each data selection method, it does not provide any new techniques based on this framework. Furthermore, the key observation is not very surprising: the computational cost contains an initial cost when evaluating the validation set, thus the perplexity-based or the gradient-based is clearly not optimal under a limited compute budget.\n2. (Major) Lack of soundness: a) the parametric model is selected to be an exponential distribution and the model is fitted to minimize the squared error, but the choice is never justified by any theoretical analysis or numerical results. The fitted curve is also not very convincing (e.g. Figure 3 and Figure 7). b) The Pareto frontier is never formally defined in this paper nor sufficiently discussed. It's very hard for me to believe that the fitted Pareto curve is indeed Pareto optimal as the points in Figure 8 and Figure 10 exceed the Pareto frontier by a large margin. Also, the fitted exponential curves surpass the fitted Pareto curve considerably in Figure 3, implying that the two curves even contradict each other.\n3. (Moderate) Lack of insights: this paper is rather an experiment report than a well-motivated paper. The motivation for studying such a computation-constrained data selection problem is not fully supported. The authors just launch a bunch of models, adopt several tasks, and collect all the results without providing sufficient analyses.\n4. (Moderate) The style file seems to not follow ICLR 2025 templates: the lines are not numbered.\n5. (Minor) Typo(s):\n Figure 1 \"using a much larger base model then data selection model\": \"then\" should be \"than\"." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Regarding the notion of utility in Section 3: Is utility here something that is to be minimized, i.e. alternatives with lower utility are preferred over alternatives with higher utility? In the remainder of the paper (expected) model performance is considered for which clearly higher values are preferred.\n\n- I am not sure whether I understood the greedy data selection introduced in Sections 3 and 4. I am under the impression that all data points are scored individually and afterwards they are ranked according to their scores and selected until budget K is exhausted. Isn't it neccesary to do this in an interleaved manner, in order to capture effects like redundancy and the submodularity of utility? Consider the extreme case in which the most informative data point x is repeated K in the dataset, then we would end up with a selection that contains the same datapoint K times. \n\n- In Figure 2, the plot represents performance of Mid-PPL, while the plot in Figure 3 represents performance of Top-PPL. What is the reason for this discreapency?\n\n- In Figure 2, what exactly is the dashed line? Shouldn't the Pareto front contain all solutions, that are dominating on the dimensions of compute (low) and accuracy (high)? The line is straight, is it something like a linear regression applied to the solutions on the Pareto front?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The study is well motivated and its results are of practical importance for finetuning large language models.\n- Empirical findings correspond well with theoretical framework" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors present a study on compute constrained data selection for training large language models (LLMs).\nUnlike preceding works, they do not constrain the size of the training set, but the compute, which is the sum of computational expenditure for data selection as well as LLM training. \nThey compare various existing data selection methods under this setting and come to the conclusion that many powerful data selection methods are almost never compute-optimal due to their computational cost, making cheaper data selection the favorable choice." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The title of the paper is rather broad, while the study is rather specific. \"Compute-Constrained Data Selection\" does not indicate which type of data is selected for which type of task.\n\nMinor remarks:\n- p.3 line 3: \\mathcal{S} \\subseteq \\mathcal{D}, as \\mathcal{X} is not introduced\n- p. 6 bottom: methods -> method" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024computeconstrained,\ntitle={Compute-Constrained Data Selection},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4es2oO9tw1},\nnote={under review}\n}" }, "abstract": { "value": "Data selection can reduce the amount of training data needed to fine-tune LLMs; however, the efficacy of data selection scales directly with its compute. Motivated by the practical challenge of compute-constrained fine-tuning, we consider the setting in which both the cost of selecting data and training are budgeted for. We first formalize the problem of data selection with a cost-aware utility function, and model the data selection problem as trading off initial selection cost for training gain. We run a comprehensive sweep of experiments across multiple tasks, varying compute budget by scaling fine-tuning tokens, model sizes, and data selection compute. These experiments support the proposed model of data selection as a balancing problem between the benefits to training speed and the additional cost of selection. Interestingly we find that many powerful data selection methods are almost never compute-optimal, and that cheaper data selection alternatives dominate both from a theoretical and empirical perspective." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Data Selection", "Compute-constrained", "Scaling Laws." ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/dc829d8bd2602f8bd91f2163e9b64184884943e0.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Compute-Constrained Data Selection" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4f4HDfbwY5
CPDD: Generalized Compressed Representation for Multivariate Long-term Time Series Generation
main
Active
Generative Model;Deep Learning;Mode Function;Diffusion Model;Long-term Time Series
generative models
3;5;5;6
4;4;4;4
2;2;2;3
2;2;2;3
2;3;2;3
4.75
4
2.25
2.25
2.5
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. CPDD divides the entire time series into N patches, which might pose a risk of disrupting critical temporal patterns at the boundaries of these patches. Could this potentially affect the model's ability to accurately capture and reproduce these dynamics?\n2. How does the patch length N impact the model performance? Is there an optimal range of N?\n3. Can you provide a detailed comparison of the computational complexity between CPDD and baselines? For example, a table comparing their training and inference time." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The generative modeling approach of decomposing time series patches into mode functions presented in this paper is novel and well-positioned in the literature on time series generation, to the best of my knowledge.\n- The introduction of the Time-series Patch Compression (TPC) module marks a notable innovation in time series modeling. This module provides a robust alternative to the commonly used autoencoder-based compression methods and trend-seasonality decomposition techniques. The exploration of its robustness and generalization is particularly noteworthy\n- Figures 1 and 2 provide a clear illustration of the proposed approach, making the core designs easier to understand." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper aims to address the challenge of balancing the long-term dependencies and short-term features in time series generation and proposes a novel model named Compressed Patch Denoising Diffusion-model (CPDD). The proposed approach first employs a Time-series Patch Compression (TPC) module to decompose time series patches into mode functions, effectively capturing latent representations of both long-term and short-term features. Afterward, a diffusion-based model with a CNN backbone is designed to learn the latent distributions and generate multivariate long-term time series. Experimental results demonstrate that CPDD achieves the SOTA performance in time series generation. Furthermore,  the robustness and generalization capabilities of the TPC module are rigorously verified." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- This paper identifies high computational demands as a limitation of existing methods, but the proposed approach also employs a computationally intensive Transformer-based architecture. Therefore, a detailed analysis of the computational complexity of the proposed CPDD is essential.\n- The visualizations in this paper are generally of high quality. Unifying the font size in Figure 4, especially on the left-side module, to match that of other figures would improve visual consistency and readability." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. How are the short-term and long-term modes decomposed from the patches?\n\n2. What is the rationale for employing the transformer as the encoder of the diffusion model instead of using the transformer directly?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. This paper effectively leverages a patch compression method to capture complex long-term and short-term dependencies in time series data.\n\n2. The authors employ trend-seasonal decomposition to facilitate the diffusion model's ability to learn complex distribution shifts." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a diffusion-based method for time series generation that integrates a patch compression module with trend-seasonal decomposition to enhance generation performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The evaluation experiments presented in the paper are insufficient to convincingly demonstrate the effectiveness of the proposed method. Specifically, more common used evaluation metrics need to be added (like MSE, MAE, .etc), and the selection of baseline methods (both the diffusion-based methods and the transformer-based methods should be compared) and datasets is not comprehensive enough to provide a robust comparison.\n\n2. The formulation of the paper needs significant improvement; the organization and clarity of the text make it difficult to identify the key ideas and contributions. \n\n3. The integration of the proposed patch compression method with seasonal-trend decomposition seems to offer limited novelty, as this combination may be viewed as a relatively minor contribution to the existing body of work in this area.\n\n4. While the paper employs a transformer as the encoder within the diffusion model, it is essential to consider the associated computational costs when making comparisons with baseline methods." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please refer to the Weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Methodology: CPDD is a patch compression method to capture complex long-term and short-term dependencies more effectively with the diffusion model for high-quality samples generation.\n2. Empirical Results: The numerical results presented in the paper are compelling, showing significant improvements over competing \napproaches in terms of generated time series quality. This empirical evidence supports the effectiveness of the proposed method.\n3. Proofs: The author gives a detailed formal proof of the effectiveness of DSConv blocks' structural regularization." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces CPDD, a method for time series generation, which addresses challenges in balance between long-term dependencies and short-term feature learning\u0002. It utilizes a patch Compressed module based on the patch mode decomposition method to obtain the latent encoding of multi-scale feature of time series. It utilizes a diffusion-based model to learn the latent distribution and decode the resulting samples, which achieves state-of-the-art performance in the generation task of multivariate long-time series with efficiency and robustness." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The problem that the article aims to address is confusing; what does \"balance between long-term dependencies and short-term feature learning\" mean?\n2. In line 25 of the article, the author mentions \"efficiency.\" How is this demonstrated in the article? Please compare the model's memory usage and inference time.\n3. In the embedding space shown in Figure 1, what do \"distant\" and \"nearby\" mean? This is quite confusing.\n4. The entire CPDD process is quite confusing. please provide a specific implementation process or corresponding pseudocode?\n5. In lines 340-341 of the article, the author mentions \"Z: latent representation obtained from the TPC Encoder during training.\" Then why is there no loss term for the TPC Encoder/Decoder in Equation 16? Is CPDD an end-to-end or a two-stage process? Please provide a detailed explanation.\n6. What is “L_{AFC}\" in Equation 16? Is there any difference between \"L_{AFC}\" and \"L_{ACF}\"?\n7. The writing issues in the article are evident, with many sentences being difficult to understand, and the challenge that article aims to address is not clearly defined." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.\tThe results of Baseline presented in Table 1 are inconsistent with the results presented in the original paper, e.g., the Discriminative Score of the Diffusion-TS model under the Sines dataset in Table 1 is 0.326, whereas it is reported as 0.006 in the original paper.In fact, the results of all Baselines in Table 1 are in significant differences. In addition, Table 1 only shows some of the metrics on the performance of time series generation, and the results of the proposed method on both Context-FID Score and Correlational Score are missing. In addition, traditional metrics for time series forecasting, such as MSE, MAE, CRPS, etc., are missing from Table 1, which results in the reader not getting a full picture of the potential limitations of CPDD.\n\n2.\tIn Table 2, in the Predictive Score metric, the model with DSConv removed achieves better performance in the Sines dataset, and the model with TPC removed exhibits the best performance in the Energy dataset. The results of the ablation experiments are puzzling, which may shake the rationality of the structural design of DSConv and TPC." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The main contribution of this paper is to propose a technique that can efficiently compress and represent multivariate long-term time series data by decomposing the patches into pattern functions through which long-term dependencies and short-term features are consistently represented. Specifically, the TPC module learns generic combinations of pattern functions from patches to accommodate various patterns and enables a generic compressed representation of time series data." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper analyses the challenges faced in the time series generation task, including the limited ability of the proposed approach to model long-term dependencies due to cumulative errors, the high computational complexity and time overhead due to the attention mechanism, and the inability to capture both long-term global dependencies and short-term local features. Inspired by the spatial distribution of latent variables modelled by LDM, in order to achieve a balance between long-term dependencies and short-run feature learning in time-series generation tasks, it proposes the Compressed Patch Denoising Diffusion-model (CPDD), where Time-series Patch Compressed (TPC) is designed based on the block pattern decomposition method to obtain multi-scale latent representations. And the diffusion model achieves high quality multivariate long time series generation after decoding by modelling the probability distribution of potential representations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "This paper is dedicated to present a technique that can efficiently compress and model multivariate long-term time series data, which has important real-world implications. The main concerns are as follows:\n\n1.\tAs a model ‘designed for multivariate long-term time series’, the main innovative structures proposed by CPDD, DSConv and TPC, do not have a structure or design aimed at establishing cross-channel connectivity. We believe that a key question is whether the proposed single channel Convolution can establish connectivity across a large number of channels, e.g., the ECL dataset of the electricity Scenarios in the time series prediction contains 321 channels and the Traffic dataset contains 862 channels. Advances in multivariate prediction methods (iTransformer[1], SAMformer[2]) have shown that proper integration of channel management strategies in time series backbones is crucial for discovering univariate dynamics and cross-variate correlations.\n\n2.\tThe lack of advanced baselines leads to the inability to validate the competitiveness of the proposed CPDD. Specifically, only three baselines based on Diffusion are shown in Table 1, and among them, TimeGAN and TimeVAE are published in 2021 and 2019, respectively. The introduction of a wider range of Baselines to compare the performance of the proposed models is expected to be complemented to fully validate the effectiveness of the proposed methods. The referenced baselines can be divided into 4 parts: 1) Models based on pre-trained LLM alignment to TS, e.g. TimeLLM[3]; 2) Pre-trained foundation models on unified time series datasets from multiple domains, e.g. Timer[4], UniTime[5]; 3) Proprietary models trained and tested on specific datasets, e.g. PatchTST[6]; 4) Recent Diffusion-based temporal probabilistic prediction models, e.g. Diffuison-TS[7], mr-Diff[8]. CCPD is expected to be compared with at least one competitive model in each prediction paradigm to demonstrate the soundness of the model design. In addition, we would like to introduce more benchmarks, such as ECL and Traffic datasets with a large number of channel counts, which we believe will help to validate the promising real-world applications of the proposed models.\n\n3.\tThe design of the ablation experiments in this paper is deficient. In addition to DSConv and TPC, CPDD uses other strategies such as Patch Embed and Trend-seasonal Decomposition, yet the ablation experiments presented in Table 2 do not include these structural designs. This raises our concern about the validity of DSConv and TPC.\n\n[1] Liu, Yong et al. “iTransformer: Inverted Transformers Are Effective for Time Series Forecasting.” ICLR 2024.\n[2] Ilbert, Romain et al. “SAMformer: Unlocking the Potential of Transformers in Time Series Forecasting with Sharpness-Aware Minimization and Channel-Wise Attention.” ICML 2024.\n[3] Jin, Ming et al. “Time-LLM: Time Series Forecasting by Reprogramming Large Language Models.” ICLR 2024.\n[4] Liu, Yong et al. “Timer: Generative Pre-trained Transformers Are Large Time Series Models.” ICML 2024.\n[5] Liu, Xu et al. “UniTime: A Language-Empowered Unified Model for Cross-Domain Time Series Forecasting.” Proceedings of the ACM on Web Conference 2024.\n[6] Nie, Yuqi et al. “A Time Series is Worth 64 Words: Long-term Forecasting with Transformers.” ICLR 2023.\n[7] Yuan, Xinyu and Yan Qiao. “Diffusion-TS: Interpretable Diffusion for General Time Series Generation.” ICLR 2024.\n[8] Shen, Lifeng et al. “Multi-Resolution Diffusion Models for Time Series Forecasting.” ICLR 2024." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We develop a novel generative framework, CPDD, specifically for long-term time series generation, exploring the cross-scale feature fusion based on the patch mode decomposition method." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024cpdd,\ntitle={{CPDD}: Generalized Compressed Representation for Multivariate Long-term Time Series Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4f4HDfbwY5},\nnote={under review}\n}" }, "abstract": { "value": "The generation of time series has increasingly wide applications in many fields, such as electricity and energy. Generating realistic multivariate long time series is a crucial step towards making time series generative models practical, with the challenge being the balance between long-term dependencies and short-term feature learning. Towards this end, we propose a novel time series generative model named Compressed Patch Denoising Diffusion-model (CPDD). Concretely, CPDD first employs the Time-series Patch Compressed (TPC) module based on the patch mode decomposition method to obtain the latent encoding of multi-scale feature fusion. Subsequently, it utilizes a diffusion-based model to learn the latent distribution and decode the resulting samples, thereby achieving high-quality multivariate long-time series generation. Through extensive experiments, results show that CPDD achieves state-of-the-art performance in the generation task of multivariate long-time series. Furthermore, TPC also exhibits remarkable efficiency in terms of robustness and generalization in time series reconstruction." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Generative Model", "Deep Learning", "Mode Function", "Diffusion Model", "Long-term Time Series" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/7d68e361c1bb90f8f672e3abfb6ca7e4904beffe.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/cf82fd640d8bdd9290d694dce627efd249cf1aea.zip" }, "title": { "value": "CPDD: Generalized Compressed Representation for Multivariate Long-term Time Series Generation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4fJghLR3hk
Addressing Extrapolation Error in Multi-Agent Reinforcement Learning
main
Active
cooperative multi-agent reinforcement learning;CTDE;value factorization;extrapolation error
reinforcement learning
3;3;6
4;3;3
2;2;3
2;2;3
1;2;3
4
3.333333
2.333333
2.333333
2
-0.5
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "While the paper notes that value factorization mitigates extrapolation errors, how does the method address the potential suboptimality caused by not fully capturing the complexity of joint action interactions among agents? Are there plans to extend the method to better account for agent dependencies and interaction effects?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- Insightful Theoretical Analysis: The theoretical framework helps illustrate the propagation of extrapolation errors and lays a foundation for understanding the importance of stable value estimation in MARL.\n- The proposed methods, including multi-step bootstrapping and ensemble TD targets, are backed by experiments showing improved performance and stability over baseline approaches in MARL settings, demonstrating their utility in practice.\n- The paper highlights the extrapolation errors in MARL and proposes practical solutions to mitigate this challenge, contributing to a better understanding and partial resolution of this important problem." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the challenge of extrapolation errors in multi-agent reinforcement learning (MARL), focusing on the issue caused by the large joint action space. To mitigate these issues, the authors propose the application of modified multi-step bootstrapping and ensemble TD target techniques, aiming to enhance learning stability and reduce prediction variance. These proposed solutions are supported by theoretical analysis that explains the propagation of extrapolation errors and the importance of ensuring consistent value estimation. Empirical results validate these approaches, demonstrating that they contribute to improved performance and more stable training in various MARL scenarios." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Fundamental Limitations of Value Factorization: Although the paper claims that the success of value factorization methods is largely due to their ability to mitigate extrapolation errors (as noted in the abstract), this mitigation is not comprehensive. The approach simplifies the estimation by focusing on local utilities, but it may fail to capture the full complexity of joint action spaces. An agent’s action value can vary significantly when combined with other agents’ actions, leading to potential suboptimal solutions. While this method improves learning stability, as further discussed in Sections 3.1 and 3.2, it does not fully address the diverse combinations and dependencies between agents, which are critical for optimal policy learning in MARL.\n- Incremental and Limited MARL-Specific Solutions: The proposed methods, while addressing the large joint action space, primarily adapt existing techniques like multi-step bootstrapping and ensemble TD targets. These approaches lack innovation and do not sufficiently consider agent interactions, a key aspect of MARL. This results in simplified solutions that may fall short in effectively handling complex, cooperative scenarios, limiting their overall impact and applicability." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Was parameter sharing used in the baselines? \n2. What is the comparison of the parameter counts across the baseline and proposed modifications? How does this scale as the number of agents increase?\n3. Could the authors specify the experiment details as discussed in the weaknesses." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper is well-written, clear and easy to follow.\n- Extrapolation error, especially in online MARL, is a relatively unexplored area. This paper appears to be among the first to address this issue, providing both an analysis and methods to mitigate it.\n- The paper provides a relevant discussion on the propagation of extrapolation error in MARL and how value factorization methods can help reduce it. Building on this analysis, the authors introduce targeted modifications to reduce the bias and variance associated with extrapolation error, with results showing consistent performance improvements across different environments and algorithms. Additionally, ablation studies on ensemble size and the $\\lambda$ annealing parameter are included." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors discuss and provide an analysis on the extrapolation error in Multi-Agent Reinforcement Learning (MARL), and show that value factorisation methods, like QMIX, can help reduce this error. Furthermore, they propose two methods to reduce extrapolation error in MARL, specifically multi-step bootstrapping and using ensembled independent value functions. The authors show that these methods can improve the performance of QMIX, in SMAC, SMACv2 and Google Research Football (GRF) environments, and of on-policy MARL algorithms like MADDPG and FACMAC on SMAC." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The experiment section is not very detailed. The authors should provide more information on the experimental setup, including how many seeds were run, the evaluation procedure and the evaluation interval. Furthermore, the main results presented in Table 1 don't include the standard deviation, which is important to understand the significance of the results.\n- Although the authors provide results in three environments, two of them are SMAC and SMACv2, which might share some similarities. It might be more informative to use a different environment to SMACv1. \n- It is unclear if parameter sharing is used in the baseline algorithms. If it is, then the proposed ensemble method would result in many more learnable parameters. This could be a potential source of the improvement in the results, especially since when using smaller ensembles ($M=1,2$) in Figure 5b, performance is worse than the QMIX baseline. It would be important to disentangle the effect of increased capacity and extrapolation error mitigation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "- Since the paper discusses extrapolation error in MARL, can you provide results demonstrating that your method mitigates extrapolation error compared to the baselines?\n\n- Since the Target Estimation Error (TEE) can be influenced by issues such as overestimation and extrapolation errors, how can you ensure that the issue is indeed extrapolation error due to unseen state-action values backpropagating rather than overestimation due to the max operator [1]?\n\n- Section 3 provides a detailed analysis of QPLEX to illustrate extrapolation error in MARL. However, Section 4 switches to QMIX. Is there a specific reason for this switch?\n\n- See Weaknesses.\n\n[1] Anschel, Oron, Nir Baram, and Nahum Shimkin. \"Averaged-dqn: Variance reduction and stabilization for deep reinforcement learning.\" In International conference on machine learning, pp. 176-185. PMLR, 2017." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- Extrapolation error in MARL is a natural extension of the single-agent case to the multi-agent case, which is reasonable. \n- The improved method is tested across three domains on numerous maps, which is commendable." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper discusses extrapolation error in multi-agent reinforcement learning (MARL). The authors show that extrapolation error is a critical issue in MARL, affecting performance due to propagation from unseen state-action pairs, especially when the action space is large, as is often the case in MARL. Instead of proposing a new algorithm, the authors introduce two existing techniques, annealed multi-step bootstrapping and ensembled TD targets, to mitigate extrapolation error. The proposed method is tested across three domains: SMAC, GRF and SMACv2. The results show that the two simple modifications to existing methods can lead to significant performance improvements." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Lack of novelty. Although the paper does not introduce new techniques or methods, I would not consider this a lack of novelty. The lack of novelty in this paper lies in its discussion of extrapolation error, which does not offer anything new. Extrapolation error is a commonly discussed topic in single-agent RL and naturally extends to MARL, which is acceptable. However, the authors do not provide new insights or discussions about challenges specific to MARL. Most of the content is similar to the single-agent case. It feels more like stitching together existing works [1,2,3,4] rather than proposing a new perspective or addressing a new issue induced by the multi-agent setting.\n\n- The writing needs improvement. The core idea is simple and natural, but the logic is messy. There are many statements that are too subjective without any evidence, making the paper less convincing. For example, line 352 states, \"The behavior policy typically originates from the old policy stored in the replay buffer, which may not align closely with the current policy after convergence\". Does this not hold even after convergence? Why? Additionally, some conclusions are not consistent with the results. For example, line 294 states, \"While the mean and standard deviation of $\\lambda$ remain small, the maximum value of $\\lambda$ grows significantly as training progresses, eventually leading to performance degradation.\" However, Fig 2 shows that $\\lambda_{max}$ decreases over time, and the performance increases when $\\lambda_{max}$ increases.\n\n- Too many results are placed in the appendix but are referenced in the main text, especially since some claims are based on the appendix (e.g., lines 355, 407, 411, and 430). This affects the readability of the paper.\n\n- The paper claims that extrapolation error is a major issue in MARL, but the authors do not provide any evidence to support this claim. The two proposed techniques are for bias/variance reduction, which do not seem to be directly related to extrapolation error. There is no evidence that the proposed method mitigates extrapolation error, thus leading to better performance.\n\n- There are no implementation details or parameter searches provided for the baseline methods. Only searching parameters for the proposed method is unfair and may lead to biased results.\n\n- Minor issue. Some learning curves are missing in the last column of Appendix Figure 10.\n\n[1] Fujimoto, Scott, David Meger, and Doina Precup. \"Off-policy deep reinforcement learning without exploration.\" In International conference on machine learning, pp. 2052-2062. PMLR, 2019.\n\n\n[2] Anschel, Oron, Nir Baram, and Nahum Shimkin. \"Averaged-dqn: Variance reduction and stabilization for deep reinforcement learning.\" In International conference on machine learning, pp. 176-185. PMLR, 2017.\n\n\n[3] Rashid, Tabish, Mikayel Samvelyan, Christian Schroeder De Witt, Gregory Farquhar, Jakob Foerster, and Shimon Whiteson. \"Monotonic value function factorisation for deep multi-agent reinforcement learning.\" Journal of Machine Learning Research 21, no. 178 (2020): 1-51.\n\n\n[4] Kozuno, Tadashi, Yunhao Tang, Mark Rowland, Rémi Munos, Steven Kapturowski, Will Dabney, Michal Valko, and David Abel. \"Revisiting Peng’s Q ($\\lambda$) for Modern Reinforcement Learning.\" In International Conference on Machine Learning, pp. 5794-5804. PMLR, 2021." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024addressing,\ntitle={Addressing Extrapolation Error in Multi-Agent Reinforcement Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4fJghLR3hk},\nnote={under review}\n}" }, "abstract": { "value": "Cooperative Multi-Agent Reinforcement Learning (MARL) has become a critical tool for addressing complex real-world problems. \nHowever, scalability remains a significant challenge due to the exponentially growing joint action space. \nIn our analysis, we highlight a critical but often overlooked issue: **extrapolation error**, which arises when unseen state-action pairs are inaccurately assigned unrealistic values, severely affecting performance. \nWe demonstrate that the success of value factorization methods can be largely attributed to their ability to mitigate this error. \nBuilding on this insight, we introduce multi-step bootstrapping and ensemble techniques to further reduce extrapolation errors, showing that straightforward modifications can lead to substantial performance improvements. Our findings underscore the importance of recognizing extrapolation error in MARL and highlight the potential of exploring simpler methods to advance the field." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "cooperative multi-agent reinforcement learning", "CTDE", "value factorization", "extrapolation error" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/4a4d61c884a5a95dbd3043575a7cde0a4a8ace85.pdf" }, "presentation": null, "primary_area": { "value": "reinforcement learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/633fecca1e5ac1279481cf68e53d9ccb90d85520.pdf" }, "title": { "value": "Addressing Extrapolation Error in Multi-Agent Reinforcement Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ftMNGeLsz
FedGO : Federated Ensemble Distillation with GAN-based Optimality
main
Active
Federated learning;ensemble distillation;data heterogeneity;generative adversarial network
other topics in machine learning (i.e., none of the above)
3;5;8
4;4;4
2;2;3
3;2;3
2;2;3
5.333333
4
2.333333
2.666667
2.333333
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper provides a theoretical foundation for the proposed approach, which validates its effectiveness and enhances its credibility.\n2. The paper analyzes communication, privacy, and computational complexity within different scenarios, providing valuable insights for implementing the proposed approach." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposed a novel federated ensemble distillation approach that utilizes generative adversarial networks (GANs) to address the challenges posed by data diversity across clients. Specifically, the proposed approach employs GANs to optimize the weighting of client predictions, thereby improving the quality of pseudo-labels generated during the ensemble distillation process. The paper provides theoretical insights that establish the effectiveness of the proposed method. Comprehensive experiments demonstrate that the proposed approach outperforms existing methods in robustness against data heterogeneity." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. This paper needs to demonstrate the effectiveness of the proposed approach on different model structures, such as VGG and MobileNet.\n2. The effectiveness of the proposed method relies on the quality of the discriminator and generator. The paper needs to conduct related ablation studies.\n3. This paper should conduct ablation studies to analyze the impact of hyperparameters (e.g. $E_s$ and $E_d$) on the effectiveness of the approach.\n4. The experimental settings of the baselines are not clearly stated, and it is important to clarify the fairness of the experimental comparison.\n5. The additional computational and communication overhead introduced by the GAN-based approach may not be suitable for FL scenarios, particularly those with strict resource constraints." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. In introduction on page 2, Our main contributions are summarized in the following: \"Federated Ensemble Distillation\" instead of \"Ferated Ensemble Distillation\".\n\n2. In theoretical analysis, near-optimal performance is heavily affected on discriminator performance. I do not understand how to select the discriminator architectures? Can you give me some detailed description?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "This paper demonstrates originality through its innovative integration of GAN-based techniques with federated ensemble distillation. The use of discriminators trained at the client side to optimize the weighting of client contributions during the distillation process is a novel approach that has not been extensively explored in previous federated learning research.\n\nThe method's originality is further enhanced by its theoretical grounding, which employs results from GAN literature to develop a provably near-optimal weighting method. \n\nThe experimental setup is well thought out" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a new approach to address the issue of data heterogeneity in federated learning. By applying Generative Adversarial Network (GAN) techniques to federated ensemble distillation, the paper proposes a near-optimal weighting method that enhances the training process of the server model. Extensive experimental validation demonstrates significant improvements in model performance and convergence speed across various image classification tasks. Moreover, the study provides an in-depth analysis of the potential additional communication costs, privacy leaks, and computational burdens introduced by this method, showcasing its practicality and flexibility in protecting data privacy and enhancing system efficiency." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper claims near-optimal performance based on theoretical justifications rooted in GAN literature. However, these claims might depend heavily on certain idealized assumptions about data distributions and discriminator performance. Real-world deviations from these assumptions could lead to suboptimal performance. The paper does not explain how to select discriminator architectures." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see Weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is well-written and easy to follow.\n2. The authors conducted extensive experiments to verify the effectiveness of the proposed method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes FedGO: Ferated Ensemble Distillation with GAN-based Optimality, for federated ensemble distillation. This algorithm incorporates a novel weighting method using the client discriminators that are trained at the clients based on the generator distributed from the server and their own datasets. The generator distributed from the server can be either off-the-shelf or trained with the unlabeled dataset on the server. The exchange of the generator and the client discriminators between the server and the clients occurs only once before the main FL algorithm starts, resulting in minimal additional overhead.\nExtensive experiments demonstrate significant improvements of FedEDG over existing research both in final performance and convergence speed on multiple image datasets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "As far as I am concerned, distillation-based FL is data dependent and requires access to an auxiliary dataset derived from publicly available proxy data sources for knowledge transfer, whereas a desirable auxiliary dataset is not always available since its construction requires careful deliberation and even prior knowledge about clients’ private data to achieve satisfactory performance, which is inconsistent with the privacy-preserving nature of FL. In addition, I argue that FedEDG with Pretrained Generator proposed in this paper also has the above-mentioned issues. This is because pre-trained generator needs to be trained on public datasets. Therefore, I remain skeptical of this research direction, even if the paper contains theoretical evidence. Furthermore, if the author wants to convince me, please provide some feasible solutions to address the aforementioned issues.\nI'll raise my score if author can address the above problems." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024fedgo,\ntitle={Fed{GO} : Federated Ensemble Distillation with {GAN}-based Optimality},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ftMNGeLsz},\nnote={under review}\n}" }, "abstract": { "value": "For federated learning in practical settings, a significant challenge is the considerable diversity of data across clients. To tackle this data heterogeneity issue, it has been recognized that federated ensemble distillation is effective. Federated ensemble distillation requires an unlabeled dataset on the server, which could either be an extra dataset the server already possesses or a dataset generated by training a generator through a data-free approach. Then, it proceeds by generating pseudo-labels for the unlabeled data based on the predictions of client models and training the server model using this pseudo-labeled dataset. Consequently, the efficacy of ensemble distillation hinges on the quality of these pseudo-labels, which, in turn, poses a challenge of appropriately assigning weights to client predictions for each data point, particularly in scenarios with data heterogeneity. In this work, we suggest a provably near-optimal weighting method for federated ensemble distillation, inspired by theoretical results in generative adversarial networks (GANs). Our weighting method utilizes client discriminators, trained at the clients based on a generator distributed from the server and their own datasets. \nOur comprehensive experiments on various image classification tasks illustrate that our method significantly improves the performance over baselines, under various scenarios with and without extra server dataset. Furthermore, we provide an extensive analysis of additional communication cost, privacy leakage, and computational burden caused by our weighting method." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Federated learning", "ensemble distillation", "data heterogeneity", "generative adversarial network" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6b2e8890f4adec7fd264be251ae9a25ed2cc14f5.pdf" }, "presentation": null, "primary_area": { "value": "other topics in machine learning (i.e., none of the above)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/e0904c57b7a83a113c52e17373c63900898442ce.zip" }, "title": { "value": "FedGO : Federated Ensemble Distillation with GAN-based Optimality" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4fyg68nmd7
Scaling Laws for Task-Optimized Models of the Primate Visual Ventral Stream
main
Active
scaling laws;neural alignment;behavioral alignment;computer vision;primate visual ventral stream
applications to neuroscience & cognitive science
3;5;6;6
4;4;3;4
2;3;3;3
2;2;2;2
4;4;3;3
5
3.75
2.75
2
3.5
-0.471405
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. On Figure 6b, that's a beautiful correlation. How far can you take it out? Just eyeballing I'd guess it would get near 0.7. Perhaps a pivot for the paper, to get the positive result I think it needs, would be to focus on this scaled-up model of behavior? Just a thought.\n\n2. Why do you think neural scaling laws are different for different brain regions and also for behavior? This is a complex question of course, and I don't expect a definitive answer, but perhaps there's something interesting here." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The authors completed an extensive sweep through model architectures, compute, and data budgets, in order to give a detailed view of how model scale relates to neural and behavioral brain scores. The key findings here are important (although with debatable novelty): (1) Neural fits asymptote or worsen with scale, (2) behavioral fits are linear with scale (although scale alone appears to be insufficient), (3) the ceiling and form of scaling laws is different for each visual area region that was investigated. Overall, this is a nice capstone on BrainScore, and perhaps is most notable for showing how methods from AI are not always applicable for explaining brain and behavior." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors investigate so-called neural scaling laws for predicting visual behavior and neural activity. \"Scaling laws\" are empirical trends that show a relationship between model scale (e.g., compute used or amount of data used in training) and its loss on a pretraining task. Here, the authors show different functional forms of scaling laws for predicting neural activity vs. behavior, where the latter is far more promising than the former." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The power of scaling laws in domains like language (IMO) is that they imply \"all you need is scale.\" That is, and in the spirit of the bitter lesson, there are no conceptual barriers to achieving a criterion level of performance, only engineering ones. If this were the case in brain science it would be a true world changer. But as this paper (and others which were cited) show, this is not the case. DNNs + scale are not the solution to explaining the variance in brainscore visual system recordings. In that sense I see a large overlap between the findings and result of [1] in which they found a trade-off between ImageNet performance and BrainScore fits. In both cases, these are null results. It is great to show this result, but the lack of a direction forward is concerning.\n\nTo drive the point home, in Fig 3, the authors show that training on ImageNet21k (but curiously not WebVision which has more images) leads to better fits. Indeed this would seem to be a scaling law... but the effect size makes it impractical at best: the model maxes out around 0.45 alignment even after all of that data.\n\nFor these reasons I struggle to see how this paper makes a strong contribution to the field. It feels better served as a memo or blog post than a conference or journal paper.\n\n2. I think some of the line fits are overly optimistic. For example, in Fig 1, the neuro line is monotonically increasing. But if I squint and just look at the dots, it looks more like a subtle decrease in fits, on average, as a function of compute. This issue is in many of the plots. This relates to my thoughts in (1) about what this all means and whether or not the findings are novel. See fig 2 ViT behavioral line fits for an example where it's not just for neural data. I am marking down the \"Soundness\" of the paper because of these line fits, but to be honest I don't have any great suggestions about how to improve the fits while maintaining interpretable \"laws\" when you have what look like non-monotic changes like with the Neural data in Fig 1c.\n\n3. The y limits of the plots should be fixed to one range. It looks like 0-0.7 captures everything. Theres too much bouncing around between different ranges in different subplots. Also could you label what dataset the validation accuracy is derived from on plots where you report it?\n\n[1] Linsley et al. Performance-optimized deep neural networks are evolving into worse models of inferotemporal visual cortex." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "* What are the implications of this work, given the limitations already presented in the paper? \n\n* What would be the predictions for a model that closely resembles the visual cortex such as CorNET ? \n\n* Given that the paper focuses on scaling, Have the authors considered how their scaling laws might apply to or change for models pre-trained on much larger datasets like LAION before fine-tuning on ImageNet? This could provide insights into whether the observed plateaus persist across different pre-training regimes" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is well written. The introduction offers a good view of the literature and it is easy to follow the procedure they use to make the evaluation. The results are clearly presented and explained. It provides a good overview of the current landscape of models in the context of neural and behavioral alignment." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a way of calculating scaling laws for neural and behavioral alignment with respect of training data and parameter size of models. It offers an interesting overview of the current status of models and its performance on these alignment challenges." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "My main observation about this work is that, while it provides valuable insights and a well-illustrated overview of the current landscape of models and their alignment with neural of behavioral benchmarks, it could benefit from more clarity on how these findings might guide future advancements. The paper mentions previous work with similar findings, as noted in the discussion; however, it would be helpful to understand more concretely how this work can serve as a foundation for the next steps in the field and how scaling laws can truly help scientists develop the next generation of more brain-like models. For instance what kind of hypothesis can be drawn from scaling laws that can be tested by adding or removing samples/compute of models being constructed to be more brain-like? \n\nAlthough the limitations section mentions that ‘these functions may not generalize beyond the scales tested,’ this suggests a natural boundary for the impact of these results. Could the authors estimate, based on their scaling laws, what order of magnitude increase in dataset or parameter size might be needed to significantly improve neural alignment beyond the observed plateau?\n\nWhile I understand that this point is mentioned in the limitations section, I feel it is a significant oversight not to include recurrent models. It is encouraging that the paper mentions that inductive bias in the form of convolution seems to yield faster returns, but this feels limited, given that most of the models tested in these benchmarks are much deeper than what might be expected for an architecture resembling the visual cortex. For instance, would be interesting to see how the scaling laws would apply to CorNet? Is it the case that the more brain like the easier it is to scape the scaling laws? that would be very impactful for the community. \n\n\nI may have missed it, but did not see mention on self supervised models or robust models and how the scaling laws operate on models trained on these type of frameworks?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* Would the authors like to highlight how different training signals would influence alignment to brain / behavior? Humans have a rich multimodal perception of the world, they use depth perception, and predominantly learn without supervision. Are the authors able to tease apart the effects of any such factors in their analyses?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* This paper sheds light on the similarity of neural network representations to biological visual representations as a function of model size, compute, and training dataset size. The authors have presented these results in a sound theoretical framework by drawing inspiration from analyses of neural scaling laws. \n* It is super interesting that different areas of the ventral visual stream have varied effects to scaling of neural architectures/datasets. I have not seen this in prior work to the best of my knowledge and this will raise interesting discussions at ICLR.\n* I appreciate that the paper is well-written, the figures are legible and accompanied with reasonably detailed captions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors study the relationship between the size / compute requirement of popular neural network architectures and their training dataset sizes vs alignment to the biological ventral visual stream. The authors analyze the alignment of various architectures to the primate VVS using the publicly available Brain-Score benchmark and claim that (1) scaling models by increasing parameter count produces diminishing neural alignment beyond a saturation point in model size, but behavioral alignment continues to increase with model size, (2) Alignment scales with training dataset size, (3) Higher visual areas in the cortical hierarchy show stronger gains in alignment with respect to scaling." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* **Lacking evaluation of what model behaviors give rise to alignment.** My main point of feedback to further improve this paper is to address what other factors of artificial neural networks contribute to enhancing similarity to biological vision. It is interesting that there exist scaling laws between model / dataset sizes and neural / behavioral alignment, but this has already been documented in prior studies. I urge the authors to further study the qualitative factors (for e.g. sensitivity to the same spatial frequencies that humans are sensitive to) that give rise to enhanced similarity between ANNs and human vision.\n* **Missing evaluation of more recent multimodal models.** There has been a surge in multimodal vision language models that, if evaluated in the same framework established by this paper, would produce really intriguing findings on model scaling and alignment. I encourage the authors to include publicly available large vision language models to increase the impact of their findings, as these VLMs are more widely in use now." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Could there be additional context on the novelty of this work relative to existing literature on model size effects?\n\nIs it possible to control inductive biases more rigorously, either quantitatively or qualitatively?\n\nIn Figure 1, what value does alignment saturation reach?\n\nIs “Utah” in l130 a reference or typo?\n\nWould increasing the benchmark sample size for V1, V2 make the results more robust?\n\nCould the paper benefit from additional discussion on neural versus behavioral alignment, and how better control of inductive biases might enhance interpretability?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The core claim—model size influencing alignment—is well supported by the results.\n\nInvestigating neural and behavioral alignment is a relevant area with potential applications for improving model interpretability and guiding architecture design.\n\nThe study contributes to understanding the role of model scale in alignment, a valuable area for both theoretical insights and practical applications in AI research." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper explores how varying model sizes impact neural and behavioral alignment, seeking insights into the relationship between model architecture and its ability to mimic human-like neural responses and behaviors." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Inductive biases might need better control, either quantitatively or qualitatively, to improve result clarity.\n\nMinor issues: typo at l100 (“ecology”), unclear reference in l130 (“Utah”), and Fig 1 could specify the saturation value.\n\nBenchmark sample size for V1 and V2 is relatively small (315), which may impact result generalizability.\n\nEquation 7’s clarity is limited without referencing equations 8 and 9; introducing C(N, D) = 6ND earlier could help." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We systematically explored scaling laws for primate vision models and discovered that neural alignment stops improving beyond a certain scale, even though behavior keeps aligning better." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024scaling,\ntitle={Scaling Laws for Task-Optimized Models of the Primate Visual Ventral Stream},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4fyg68nmd7},\nnote={under review}\n}" }, "abstract": { "value": "When trained on large-scale object classification datasets, certain artificial neural network models begin to approximate core object recognition (COR) behaviors and neural response patterns in the primate visual ventral stream (VVS). While recent machine learning advances suggest that scaling model size, dataset size, and compute resources improve task performance, the impact of scaling on brain alignment remains unclear. In this study, we explore scaling laws for modeling the primate VVS by systematically evaluating over 600 models trained under controlled conditions on benchmarks spanning V1, V2, V4, IT and COR behaviors. \nWe observe that while behavioral alignment continues to scale with larger models, neural alignment saturates. \nThis observation remains true across model architectures and training datasets, even though models with stronger inductive bias and datasets with higher-quality images are more compute-efficient. %demonstrate better sample efficiency at lower scales, especially for neural alignment. \nIncreased scaling is especially beneficial for higher-level visual areas, such that small models trained on few samples exhibit only poor alignment.\nFinally, we develop a scaling recipe, suggesting that a greater proportion of compute should be allocated to data samples over model size. \nOur results suggest that while scaling alone might suffice for alignment with human core object recognition behavior, it will not yield improved models of the brain's visual ventral stream with current architectures and datasets, warranting a rethinking in the way we build brain-like models." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "scaling laws", "neural alignment", "behavioral alignment", "computer vision", "primate visual ventral stream" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/2f66a128b13ba33ffb467081bb76b63dea0798fd.pdf" }, "presentation": null, "primary_area": { "value": "applications to neuroscience & cognitive science" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/e38037ae9b496391d5a384a9bf883e777ead8955.pdf" }, "title": { "value": "Scaling Laws for Task-Optimized Models of the Primate Visual Ventral Stream" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4g0PUEAHg0
Transformers Learn Bayesian Networks Autoregressively In-Context
main
Active
tansformer;Bayesian network;in-context learning
learning theory
1;3;5;5
5;3;4;3
1;1;3;3
1;2;2;3
2;2;2;3
3.5
3.75
2
2
2.25
-0.636364
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Some questions were raised above. \n\n- In the definition of a BN, the causal order seems to respect the index order. Does the main theorem hold when the ordering is not known, i.e. the variables are permuted uniformly in the samples?\n\n#### Typos:\n\n- L152: will-known -> well-known\n- L198: paramter -> parameter\n- L294: missing citation for visualization\n- L667: nad -> and" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- There has been a lot of growing interest in theoretically studying whether transformers can learn certain structures [1, 2, 3]. The problem this work studies, whether transformers learn bayesian networks, is very interesting and relevant to the ICLR community.\n\n- The general problem of learning a BN is very tricky (even with transformers) and the work simplifies it nicely using a curriculum approach so only a few variables are introduced at each stage. However, while the idea is novel, this does limit the usefulness of this algorithm (see weaknesses below).\n\n#### References:\n\n- [1] Transformers Learn Shortcuts to Automata\n- [2] Do LLMs dream of elephants (when told not to)? Latent concept association and associative memory in transformers.\n- [3] (Un)interpretability of Transformers: a case study with bounded Dyck grammars" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper theoretically constructs a simple transformer model that learns to sample from a Bayesian Network (BN) from in-context samples. A BN is an ordered set of variables that has a causal ordering among their variables and satisfy various conditional probabilities. The paper shows that for a BN of bounded maximum indegree, a simple 2-layer transformer can approximate the conditional probabilities and generate new samples. The proof is simple and basically constructs positional embeddings that mimic the parent structure of the BN and then applies MLE. Experiments are conducted to validate the theory on simulated BNs and also probe the number of layers needed. The target audience are people interested in theoretical machine learning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- While the result is nice to have, it's unclear whether how the main theorem of this work compares to results from existing works on universal approximation capabilities of transformers.\n\n- Moreover, it's also unclear whether gradient descent or standard approximation methods used to learn such models will extract some sort of similar structure. The authors state this in their conclusion, however this is a relevant weakness of this work and limits its utility.\n\n- The curriculum setup sounds interesting, however it seems to require apriori knowledge of the causal order and this may not be available in practice.\n\n- While experiments on simulated data validate the theory, it would also be nice to have some validation on real-life data (even on few variables)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "+ Can you please clarify the problem formulation in this paper? I don't think it's accurate to say that this paper is about Bayesian network learning. However, I'd like the authors to reflect on this aspect, and clarify this point.\n\n+ Can you elaborate significantly on how the Transformer is trained, including details about: is it a causal decoding Transformer? Is it trained to minimize the negative log-likelihood of the next variable given previous ones? Include all details that can help a reader clearly understand the training objective.\n\n+ Can you shed light on the dimensions of the input $X$ and in particular, clarify the apparent mismatch in dimensions of the last row against the previous rows?\n\n+ Can you clarify how the output of the final linear layer is transformed to produce a proper vector of probabilities for a categorical distribution?\n\n+ Can you clarify the missing details about the empirical studies that I noted in the \"weaknesses\" section?" }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "This paper investigates whether Transformers are capable of estimating multivariate discrete distributions in-context. In and of itself, this research question has not been studied yet, to the best of my knowledge." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper considers the ability of Transformers to estimate in-context the conditional probabilities of categorical variables. Theoretically, the paper seeks to prove that for any joint distribution over categorical variables and an ordering over them, there exists a two-layer Transformer that can represent the true conditional probabilities of each variable given those that are earlier in the ordering. Empirically, the paper considers experiments on synthetic data where Transformers are trained on samples from different Bayesian networks that all come from some family of graphs. The paper compares the probabilities estimated in-context to the ground truth as well as those estimated via naive Bayes and Bayesian inference, finding trends that suggest that Transformers have the capacity to estimate conditional probabilities in-context." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "+ A key flaw with this paper is a misrepresentation of the problem: this is not a paper about learning Bayesian networks in-context; rather, it is a paper about whether Transformers can estimate conditional probabilities of discrete variables in-context. In lines 106-107, where the problem is introduced, note that the Bayesian network that is specified is not the true Bayesian network (BN) that defines the joint distribution of the variables: it is simply a factorization of the distribution via chain rule given a particular variable ordering. This factorization is generic and valid for any distribution. By contrast, the __true__ BN that underlies a distribution can entail far more conditional independences than is given by the chain rule. Even if this paper was about learning BNs, BNs are anyways not identified by observed data: it is known theory that multiple BNs entail the exact same set of conditional independences.\n\n+ Following up on the previous point, the paper can be interpreted as asking: can Transformers estimate joint distributions of discrete variables in-context? The technical result is in service of showing that true conditional probabilities can be captured by the hypothesis class of two-layer Transformers. But, the significance of this finding lacks context: what is the broader implication if Transformers can estimate multivariate discrete distributions in-context? What questions will this help us answer in the broader context of machine learning? The authors need to properly contextualize the questions and finding in their paper. \n\n+ The technical setup lacks clarity about details that are essential to a paper about Transformers and in-context learning: what is the precise objective by which the Transformer is trained? Is it a causal decoding Transformer trained to minimize the negative log likelihood of the next categorical variable given the previous ones in a particular sample? Details about how the Transformer is trained are completely missing. Further, for completeness, the authors should also properly define every piece of notation like $0_{dm}$ and $\\mathbf{e}_{N+1}$ -- I imagine these define a matrix of 0s and the $N+1$-th standard basis vector, respectively? But readers shouldn't have to interpret key pieces of notation. \n\n+ There are technical details that do not appear to be correct. For example, in Eqn. 3.2 that defines the input matrix $\\mathbf{X}$ to the Transformer, the dimensions do not make sense: each $\\mathbf{x}_{ij}$ entry is a $d$-dimensional one-hot encoding, as stated in in line 117, but the vector $p$ is $(M+1)d$-dimensional according to Eqn. 3.1. Thus, the last row of the input $\\mathbf{X}$ seems to have more columns than the rows above. Another example is in line 190: to specify the output of the model, the authors indicate $\\mathbb{R}^d$ and define operations that would produce a $d$-dimensional real-valued vector, but for categorical variables, we need to output vectors in the $d$-dimensional simplex. The composition of the $\\mathrm{Read}(\\cdot)$ and $\\mathrm{Linear}(\\cdot)$ functions would not produce vectors that are probabilities that sum to 1, as needed for evaluating the log likelihood or for sampling discrete variables.\n\n+ The empirical studies also lack clarity about key details. For example, in lines 261 and 262, the phrase \"the probability distribution of those graphs ...\" is not parseable. What is this referring to? Second, the methods that are compared with a Transformer -- naive Bayes and Bayesian inference -- are significantly lacking in clarity. How is naive Bayes being applied to the density estimation problem considered in this paper? Bayesian inference is not a model, it is a method, so what is the underlying model on which Bayesian inference is applied and what is the posterior being inferred? These details are not clear from the paper and limit the ability of a reader to make sense of the empirical findings." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Could you specify “naive bayes” and “bayesian inference” in main text?\n2. Could you provide whether the trained transformers implement an algorithm proposed at Theorem 4.1 and Section 6?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The paper proposes a theoretical construction that transformers are capable to capture Bayesian networks in context.\n2. The paper presents a well-defined experimental framework to explore how transformers learn Bayesian networks in context, which could inspire further research.\n3. The paper compares prediction accuracy by varying the variables and types of Bayesian networks, providing a detailed description of qualitative differences among various instances." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The main goal of this paper is to demonstrate how transformers can learn Bayesian networks in context. The paper provides a theoretical proof showing that transformers are capable of modeling Bayesian networks in context. Additionally, The paper provides an evidence that transformers trained on such tasks can make Bayes-optimal predictions for new samples in an autoregressive manner." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper does not provide evidence on whether the trained transformer implements the algorithm proposed in Theorem 4.1 and Section 6. Other previous works on similar topics utilizes attention pattern analysis and causal studies through ablations.\n2. The paper lacks explanations of terms like “naive bayes” and “bayesian inference” and does not clarify how the accuracy of these algorithms is calculated in the accuracy plots in Figures 2, 4, and 6.\n3. The paper does not address the robustness of the results under more realistic settings, such as with positional embeddings." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See weaknesses part." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "- The paper follows a relevant and fruitful line of work studying in-context-learning (ICL) on controlled settings.\n\n- The paper proposes the interesting benchmark of Bayesian networks to study ICL capabilities of transformers.\n\n- The paper provides a theoretical construction of a simple 2-layer transformers capable of estimating conditional probabilities of Bayesian networks and of generating a new sample autoregressively from the inferred graphical model." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper studies the problem of in-context learning in transformers. In particular, it focuses on whether transformers are able to learn Bayesian networks in-context. In this setting, the model, given N different realisations of a specific graph and a query sample, is tasked to predict the probability distribution associated with a missing variable (see construction in Eq. 3.2). The assumption is that, if the model is able to infer the conditional probabilities associated with the Bayesian network, it can then use them to predict the value of the missing variable. In addition, once the model has captured such conditional probabilities, it is in principle able to generate new samples from the inferred graphical model (Algorithm 1). \n\nThe authors first provide a theoretical construction for a two-layer transformer which is capable of estimating the conditional probabilities of the Bayesian network according to the context, and autoregressively generating a new sample according to the Bayesian network with estimated conditional probabilities (Theorem 4.1 and Lemma 6.1 and Lemma 6.2). \n\nThe authors also conduct an empirical analysis to show the performance of trained transformers (with up to 6 layers) on the task of in-context learning three graph structures, namely a \"general graph\", a \"tree\" and a \"chain\". The performance of the model is studied by varying the number of in-context examples seen at training time and evaluating the model on different number of test in-context samples. The results show some evidence that transformers are capable of learning Bayesian networks in-context." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- I find the notation quite confusing and the way the paper is organised makes it a bit hard to follow. For example, looking at Algorithm 3.1, it seems that the input to the model is of size (2M+1)d x (N+1) where the N+1 factor takes the query into account, while it seems the Read Function takes as input a tensor of size (2M+1)d x (N). In addition, I found a bit hard to follow the description of how the training and test datasets are generated (paragraphs Datasets and Metrics in Section 5.1). Could the authors clarify these points?\n\n- As far as I understand, the paper focuses on only three different Bayesian networks with a fixed structure (shown in Figure. 1). If my understanding is correct, I believe more varied and diverse graph structures should be considered to better support the author's thesis. Can the analysis be extended to other graphs? \n\n- On a related note, why only binary variables are considered? It would be interesting to extend the analysis to variables taking values from a vocabulary of a certain size. \n\n- In section 5.1 (model paragraph), the dimensions of p and p_q changes compared to Eq. 3.1 where they were defined. Could the authors please clarify?\n\n- From the experiments in section 5.3 a one layer transformer seems to be enough. This result contrasts with the theoretical construction which in principle would require a 2-layer model. Could the authors better elaborate on this point? \n\n- Several typos across the manuscript. See, for example, missing link in the \"Curriculum Design\" paragraph (\"A visualization of the curriculum is in XXX\")" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024transformers,\ntitle={Transformers Learn Bayesian Networks Autoregressively In-Context},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4g0PUEAHg0},\nnote={under review}\n}" }, "abstract": { "value": "Transformers have achieved tremendous successes in various fields, notably excelling in tasks involving sequential data like natural language processing. Despite their achievements, there is limited understanding of the theoretical capabilities of transformers. In this paper, we theoretically investigate the capability of transformers to autoregressively learn Bayesian networks in-context. Specifically, we consider a setting where a set of independent samples generated from a Bayesian network are observed and form a context. We show that, there exists a simple transformer model that can (i) estimate the conditional probabilities of the Bayesian network according to the context, and (ii) autoregressively generate a new sample according to the Bayesian network with estimated conditional probabilities. We further demonstrate in extensive experiments that such a transformer does not only exist in theory, but can also be effectively obtained through training. Our analysis showcases the potential of transformers to effectively learn complicated probabilistic models, and contributes to a better understanding of the success of large language models." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "tansformer", "Bayesian network", "in-context learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/0921097505a503ef13858798ca5c8766dc28ab12.pdf" }, "presentation": null, "primary_area": { "value": "learning theory" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/7a10bf5dfa27293719f67318dff774ca7127f69b.zip" }, "title": { "value": "Transformers Learn Bayesian Networks Autoregressively In-Context" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4gaySj8kvX
Accelerating Goal-Conditioned Reinforcement Learning Algorithms and Research
main
Active
Deep Reinforcement Learning;GPU-accelerated Physics Simulators;Contrastive Learning;Unsupervised Reinforcement Learning
reinforcement learning
3;3;8;8
3;4;4;4
3;3;4;3
2;1;4;3
3;3;4;3
5.5
3.75
3.25
2.5
3.25
0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Figure 4 typo: \"though DPO policies remain at the goal for a shorter\"" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The library is well-motivated, as speeding up RL leads to better experimentation\n- The authors implement many environments and energy functions\n- The scale, energy function, update-to-data ratio experiments are interesting and useful for future work on GCRL" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors propose a new library for goal conditioned reinforcement learning (GCRL). Unlike prior work, their method runs end to end on the GPU, making training faster. They implement 8 environments in JAX, as well a few algorithms and various objectives. Then, they evaluate existing methods across a number of axes, investigating replay ratios, model sizes, and energy functions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The library appears like a \"one-and-done\" sort of thing that will not be maintained after publication. In RL there is already a large graveyard of abandoned RL projects that no longer run and provide no value to the community. Given this fact, I can only review the current state of the library. In its current state, I think the library needs a bit more work before publication. Please see https://docs.cleanrl.dev for an example of what I think a modern RL library should look like.\n\n- There is no documentation, it is unclear:\n - Which approaches are implemented\n - How to use these approaches\n - How to add new models\n - The structure of the codebase\n- There are no unit tests, so the correctness of the code (and the ability to maintain the code as time goes on) is unclear\n- The train script is solely for the authors, relying on a pre-existing conda environment\n- There are no tutorials beyond a single bash command that runs a parameter sweep\n- The library relies on wandb, and does not seem to run without a wandb account\n- As far as I understand, the authors only implement 3 algorithms, and I would like to see more than three baselines so that we can do proper comparisons" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- What is your support plan going forward with JaxGCRL, are you planning on adding new environments or algorithms?\n- It seems like JaxGCRL is very much focused on brax-type environments, is there a part of goal conditioned RL research that potentially focuses rather on discrete action environments that you are leaving out?\n- What about other non-contrastive GCRL algorithms? Are you planning on adding support for those?\n\t- Relatedly, how easy would it be for someone else to implement a new GCRL algorithm to fit within your framework?\n\t- And how easy is it to add another goal conditioned environment, based on an existing JAX environment? For instance, minigrid or craftax or xland minigrid, etc?\n- In the maze, for instance, can you dynamically, within the JIT, change the maze layout, or does it have to be statically known at compile time?\n- Is there an easy way to transfer a JaxGCRL agent to existing environments?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Adding even more environments and algorithms to the JAX ecosystem is great, especially for goal-conditioned RL which is lacking in this space.\n- The proof-of-concept experiments demonstrate what this library can allow, namely, more thorough investigation of design decisions in goal-conditioned RL\n- The writing and motivation is quite clear." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces JaxGCRL, a codebase that contains environments and a scalable goal-conditioned RL algorithm, all implemented in JAX. This allows researchers to train GC agents much faster than before, making these experiments more accessible. This work also analyses several design decisions of contrastive RL algorithms, enabled by a fast simulator & algorithm implementation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- I can't see any major weaknesses, apart from the limited number of environments, although 8 is pretty respectable." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "* In Sec 5.3, why is the contrastive objective only evaluated on part of the 8 environments? Similar question in sec 5.6 for examining different UTD ratios.\n* In Fig 1. Are the num_actors same for the JaxGCRL and CRL?\n* How do you define if the agent is within goal's proximity?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "* The JaxGCRL codebase is significantly faster than the original codebase. \n* The proposed baseline consistently outperform the counterpart in all 8 environments, demonstrating the stableness from simple to complex environments.\n* The performance of different design choice is extensively tested and the result metric is easy to interpret." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper provides a JIT-complied codebase with vectorized environments that can speed up the training and iterating new ideas on goal-conditioned reinforcement learning problems.\nIn additional, it provides a stable baseline algorithm for the goal-conditioned reinforcement learning problems that's benchmarked in the 8 diverse continuous environments." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper mentions it leverages the power of GPU-accelerated simulators, but by comparing against the brax training code under https://github.com/google/brax/tree/main/brax/training, there are some similarities for the training code as well, and it's not mentioned in the paper." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weaknesses.\n\n[1] Brax documentation. https://github.com/google/brax?tab=readme-ov-file#one-api-four-pipelines\n\n[2] Gymnax: A JAX-based Reinforcement Learning Library. Robert Lange. https://github.com/RobertTLange/gymnax\n\n[3] Lu, Chris, et al. \"Discovered policy optimisation.\" Advances in Neural Information Processing Systems 35 (2022): 16455-16468. https://github.com/luchris429/purejaxrl" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper has several significant strengths.\n\n- Although not novel, JAX implementations are to be commended. They improve research iteration speed significantly.\n- The paper is very well written. The authors communicate their results clearly and unambiguously.\n- The authors evaluate using the inter-quartile mean and bootstrapped confidence intervals. This is more sound than using learning curves etc.\n- The authors provide a number of ablations and experiments that explain the performance of their implementation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors introduce JaxGCRL, a benchmark and framework for evaluating goal-conditioned RL algorithms based on contrastive learning.\nThey re-implement a number of goal-conditioned tasks from prior literature and evaluate their implementation on it. \n\nThey then evaluate the effect of different losses, more samples, and larger networks on their implementation. They demonstrate that their Jax-based implementation is significantly faster than previous libraries, accelerating future research." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "However, I have a number of issues with this paper, which is why I recommend rejection. \n\n- The authors claim that their setting is challenging, but do not effectively demonstrate that this is the case. The authors demonstrate that by using a bigger network (1024 layer width and depth of 4) and layer norm, the performance significantly improves. They also run experiments where they train for significantly more interactions. However, as best I can tell (it is not always clear which network is used in which experiment), the authors never run their biggest, highest performing network for 300M steps on all the tasks. The authors do not pitch their work as focussing on sample efficiency, and therefore I am not sure why their evaluation framework should be compelling if the tasks can be solved by scaling up networks and using more samples. If the authors can provide a demonstration that this does not satisfactorily solve their benchmark, **I will raise my score**. However, without this demonstration, I do not believe that the experimental insights and JAX implementation are enough to warrant acceptance.\n- I am confused about the experiments concerning the update-to-date ratio (UTD). Given a fixed step budget, doing fewer or more updates is a pure trade-off. You can do fewer, less noisy updates, or do more, noisier updates. This occurs all over RL, for example when choosing the number of parallel environments to use in PPO. I am not sure why a high or low number of updates would be beneficial, or this quantity would be interesting to examine.\n\nI also have a number of more minor points:\n- The authors claim that they cannot directly compare brax and mujoco because brax uses a different physics engine, but the MuJoCo physics engine has been available in brax for a while now [1] -- what exactly is the issue here? \n- The discussion of related work on jax-based environments is missing some work. Gymnax [2] and PureJaxRL [3], both were important landmarks in the use of and benefits of JAX in RL and warrant inclusion.\n- The authors should probably rephrase line 117, which begins with \"In addition to reducing CPU-GPU data-transfer overhead...\". While implementing an environment in JAX *does* do this, there are also significant other factors such as JIT compilation and the resulting operator fusion and the ability to use more vectorised environments than a typical CPU + pytorch approach that lead to the significant speedups. \n- A number of the papers listed in the appendix have incorrect citations or are missing authors.\n- Line 1032 in the appendix contains a typo (lenght -> length)" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "This paper presents JaxGCRL, a high-performance codebase and benchmark designed for self-supervised goal-conditioned reinforcement learning, offering faster training and promoting more efficient RL research." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024accelerating,\ntitle={Accelerating Goal-Conditioned Reinforcement Learning Algorithms and Research},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4gaySj8kvX},\nnote={under review}\n}" }, "abstract": { "value": "Self-supervision has the potential to transform reinforcement learning (RL), paralleling the breakthroughs it has enabled in other areas of machine learning. While self-supervised learning in other domains aims to find patterns in a fixed dataset, self-supervised goal-conditioned reinforcement learning (GCRL) agents discover *new* behaviors by learning from the goals achieved during unstructured interaction with the environment. However, these methods have failed to see similar success, both due to a lack of data from slow environment simulations as well as a lack of stable algorithms. We take a step toward addressing both of these issues by releasing a high-performance codebase and benchmark (`JaxGCRL`) for self-supervised GCRL, enabling researchers to train agents for millions of environment steps in minutes on a single GPU. By utilizing GPU-accelerated replay buffers, environments, and a stable contrastive RL algorithm, we reduce training time by up to $22\\times$. Additionally, we assess key design choices in contrastive RL, identifying those that most effectively stabilize and enhance training performance. With this approach, we provide a foundation for future research in self-supervised GCRL, enabling researchers to quickly iterate on new ideas and evaluate them in diverse and challenging environments. Code: [https://anonymous.4open.science/r/JaxGCRL-2316/README.md](https://anonymous.4open.science/r/JaxGCRL-2316/README.md)" }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Deep Reinforcement Learning", "GPU-accelerated Physics Simulators", "Contrastive Learning", "Unsupervised Reinforcement Learning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/876fa432ff2d5b2c40a1ac62fcd2ba083cc9b464.pdf" }, "presentation": null, "primary_area": { "value": "reinforcement learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Accelerating Goal-Conditioned Reinforcement Learning Algorithms and Research" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4hFT4rfG40
Plug-and-Play Controllable Generation for Discrete Masked Models
main
Active
Discrete Masked Models;Controllable Generation;Plug-and-play
generative models
1;3;5;6
4;3;4;3
1;2;3;4
1;1;2;2
1;3;4;3
3.75
3.5
2.5
1.5
2.75
-0.390567
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "N/A" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "I have several questions regarding the content of the paper:\n* What was the metric used for computing/conditional generation when optimizing the helical fraction?\n* How is the reward function on page 8 derived? Additionally, why are the intervals for the metrics sometimes closed (i.e. instability with $A = [0, 40]$, and sometimes unbounded (i.e. helix % with $A = [0.8, \\infty)$), and in what settings is bounded/unbounded preferable?\n* Are the helical fractions correct in the protein experiment? In figure 3, the bottom two proteins seem almost identical, yet one has a helix fraction of 0.78, and the other 0.44. This does not seem quite correct." }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "* The paper tackles a broad category of problem; namely plug-and-play conditional generation using discrete masked models without the need for fine-tuning. Additionally, they lay out in which settings their methodology would be advantageous (for example, they indicate that this method is useful when evaluating the masked model is much more expensive to evaluate than the reward function).\n* The authors make a good effort at making the paper reproducible by including source code of the algorithm (as well as detailed algorithm descriptions) in the appendix.\n* Figure 1 is quite good and greatly facilitates understanding of the proposed methodology. In addition, the paper clearly describes the problem which they aim to solve, subsequently provides a concrete approach which makes the problem tractable, and performs some preliminary empirical validation." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper tackles the problem of performing conditional generation of discrete structures via masked generative models. They propose a general-purpose approach for optimizing arbitrary objective functions during the generation process. Subsequently, they provide several simplifications and concrete modelling approaches to make the problem tractable and computationally efficient. Finally, they apply the methodology to a toy problem as well as a protein generation task." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "__Theoretical Concerns__:\n\n* Several key aspects of the paper lack a theoretical justification or are not derived in a principled manner. For example, the proposed reward equation $r(x) = \\exp\\left({-\\sum w_i \\text{dist}(m_i(x), A_i)^{\\alpha_i}}\\right)$ is provided with no theoretical grounding or explanation. As best I can tell, the definition of the sampling distribution $q(z) = Z^{-1} r(x)p(x)$ would require $r(x) \\geq 0$ in order for $q(x)$ to be a valid distribution. However, this is not mentioned, and many alternative reward functions could be used. I would like to see a more detailed explanation of why this reward function was chosen (either theoretical justification or empirical results).\n* Similarly, the use of the mean-field approximation and importance sampling present several practical challenges which are not addressed. In the case of importance sampling, the results are heavily dependent on samples obtained from regions of high density, and thus may require many monte-Carlo samples if the proposal distribution is far from the true distribution. Furthermore, the mean-field-approximation assumes that the probabilities of the masked inputs are independent conditioned on the observed values. This is clearly not the case for domains such as images, which exhibit strong local structure. The paper would be much improved with additional analysis of the performance of the proposed methodology when the assumptions are violated and/or on larger-scale problems more representative of real-world use.\n* The authors mention that the proposed method is beneficial when the complexity of querying the masked model is much higher than evaluating the reward function. Unfortunately, this is only true for trivial objective functions. For example, protein structures are typically optimized for a complex objective that is computed by another deep learning model (i.e. predicting biological activity, folding structure, etc.). This calls in question the applicability of the method to wider categories of problems, as most problems of interest will not have a closed form/cheap objective function.\n\n__Experimental Concerns__:\n* In terms of the experimental validation, the experiments performed do not provide sufficient evidence that the methodology works as intended. First, the experiment using the toy problem uses a uniform masked model with a linear objective function. As expected, the proposed approach performs well given that the problem is explicitly formulated satisfy the mean-field approximation and importance sampling schemes. No attempt is made to characterize how the method performs as assumptions are violated. Furthermore, the protein experiments are conducted using objectives which are much too simple. GRAVY (Grand Average of hydropathy) is a simple sum of values per individual amino acid. Similarly, the instability index (Guruprasad et al., 1990), consists of summing values from a lookup table for pairs of amino acids. These objectives are simple enough that the assumptions of MFA and importance sampling are not violated, but are not representative in terms of computational costs or complexity of typical protein design tasks. Finally, an experiment is performed to optimize the helical fraction of the peptides. The objective used is not clearly defined in the paper, but validation is performed using ESM3. Consequently, if ESM3 is used for the helical fraction objective, then the objective would not be cheap to evaluate, and the initial assumptions made by the paper are violated. Overall, the paper would benefit from more extensive and principled empirical validation in settings more representative of how the methodology would be used in practice.\n* Another aspect of the experimental results is that both the toy problem and the protein design task consist of relatively simple 1-dimensional discrete structures. I would need to see this methodology applied to more complex discrete structures such as 2D image generation or graph structures (such as per-atom molecule design) in order to validate some of the wider-scope claims made.\n* In terms of presentation, many of the figures would benefit from more detailed captions to clearly present what is being shown. For example, figure 2 seems to imply that additional monte-carlo samples enable the algorithm to achieve a high degree of success when optimizing the objective, however this is only briefly touched upon in the main text, and not at all addressed in the caption. Additionally, figures 5/6 are quite visually crowded and hard to parse. As these figures occur in the appendix, the authors could take more space to make sure that the results are clearly and unambiguously presented.\n\n__Contribution Concerns__\n* The main contribution of the paper seems to be the introduction of the sampling distribution $Z^{-1} r(z)p(x)$, and then using MFA and importance sampling to sample from this distribution. This is not a novel methodology and is well known in various Bayesian settings. To accept the paper, there would need to be a more significant theoretical contribution. Additionally, there exists pre-existing plug-and-play samplers for continuous diffusion models, this paper extends plug-and-play samplers to discrete masked models, and this does not present a significantly novel framework for conditional generation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1) Why did you choose to look only at protein sequences and not natural language for controllable generation tasks?\n2) Is there a relationship between the number of MC samples needed and the mean field approximation or the remasking schedule? For example, if gamma is too high (or too low), does it take more samples to achieve high final reward?\n3) What are some of the limitations of this model w.r.t. the reward design? What characterizes a controllable generative task for which reward design / success would be easy / hard?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "Overall the paper is very well written. The motivation of the problem, controllable discrete masked model generation without training, is good, as this implies flexible controllable generation without additional computational overhead of training for each controlled generation task. The theory appears to be sound to me without any errors, arriving at the mean field approximation with importance sampling, which seems to be a reasonable approach and yields decent results on both the toy task and protein generation tasks. The paper does an excellent job presenting the work as close to diffusion models, which makes the theory sections easy to read. The experiments for the most part are well motivated and the results do support the usefulness of the approach to some controllable settings." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This model presents a method for controllable generation given a pre-trained (discrete) masking model without further training. Given a reward function, the method iteratively applies masking and remasking along with a mean-field approximation and importance sampling to perform controlled (e.g., conditioned on a class variable) generation in a \"plug-and-play\" manner. The work lays out the grounding theory and connections to (continuous and discrete) diffusion models, motivates the approach, and demonstrates the approach both on toy sequential data and protein generation (inpainting and class conditioned) tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "No limitations are presented in the paper, and it seems like there may be some worth discussing. One is reward function design, as it's unclear whether some tasks may not have difficult to design reward functions or if there's a high dependence on reward function on success. The next is that the Monte Carlo samples seem to be quite high, the performance in figure 10 seems to indicate that even at 10k samples the model is still improving. There really should be more of a discussion about this limitation, which I believe is likely due to either the mean field approximation or the remasking schedule, but neither of these limitations / issues are discussed to any significant degree. Finally, I wonder why we're only looking at protein sequence generation as the task: why not also look at some natural language applications?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Q: One of the motivations is that by setting r to be the p(y|x), one can sample from the Bayesian posterior p(x|y). How accurately can this method sample the Bayesian posterior and what kind of Bayesian inference problems can it be applied to?\n\nThe reasoning for my score is that I find the claims of effectiveness and versatility lack evidence.\n* Effectiveness: The experiments have no baselines therefore its difficult to evaluate if the method is effective or not.\n* Versatility: The method is only evaluated on a single domain (not counting the toy example).\n\nI am willing to increase my score if the authors argue or provide further evidence in support of these two claims in the rebuttal." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper presents a well-justified method from conditional sampling the presence of a reward function. It details the assumptions it makes and it gives an intuition when/why someone would use this method for conditioning.\n\nIn terms of novelty, SIR is not novel, but its application to masked generative models for controllable generations is. I am not aware of other works that use this idea for masked generative models.\n\nThe paper is very well written and easy to understand. The motivation is clear, the method is well-explained and detailed. Figure 1 and Algorithm 1 give an excellent overview that makes it easy to implement.\n\nThe experimental results on protein generation are extensive. They show convincing results on two benchmarks: solubility and alpha-helix percentage. Furthermore, they include a qualitative assessment of protein in-painting." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a method for generating conditional samples from a masked generative model. Assuming the existence of a reward model, the method draw approximate samples from the unnormalized density r(x)p(x) without requiring the generative model to be retrained.\n\nThe method applies the Sampling Importance Resampling (SIR) trick to obtain approximate samples from the target distribution over the course of the generative process.\n\nExperimental results demonstrate the concept on a toy problem as well as showcasing impressive results on a protein generation benchmark." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "A main weakness of the paper is the experimental results. The work is motivated by the versatility of the approach: they claim strong performance across multiple domains. However, experimental results only include protein generation benchmarks. There are not experiments on text, images or audio with the masked models that are discussed in the introduction.\n\nRegarding the protein benchmarks, there is no baseline to compare against and there are no ablation experiments.\n* Baselines: It would be good to see how the method compares to naive fine-tuning approaches (while acknowledging that the proposed method is much lighter computationally).\n* Ablations: The method does not have many hyperparameters to set, but it would be good to see how the generation quality depends on the number of Monte-Carlo samples used." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See Weaknesses" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* The paper is generally well-written, and clear\n* The method is relatively simple, and easy to implement\n* The method only requires an unconditional model, and can be used to controllably generate from any conditional distribution given its corresponding reward function" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a method to enable controllable generation given any unconditional generative model and a reward function as conditioning signal. This is done through computing importance weights using Monte-Carlo estimates and evaluates resulting samples using the given reward function. The authors demonstrate the effectiveness of their method using a toy dataset and in the context of conditional protein generation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The novelty is relatively low, as importance sampling has been very well studied in prior works. Although to my knowledge, I have not seen it applied it in the context of controllable generation, the experiments do not well demonstrate the effectiveness of the proposed method\n* Core experiments are on relatively easy (low-dim) distributions, and it is unclear as to how this method scales. How well does the method work for more complex distributions, e.g. for images, longer sequence proteins, etc? Do you need significantly more Monte Carlo samples?\n* The method quite heavily relies on a good reward function -- which, in general may be difficult to properly specify. How does performance depend on how well the reward function is shaped?" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a novel plug-and-play framework for controllable generation with discrete masked models." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024plugandplay,\ntitle={Plug-and-Play Controllable Generation for Discrete Masked Models},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4hFT4rfG40},\nnote={under review}\n}" }, "abstract": { "value": "This article makes discrete masked models for the generative modeling of discrete data controllable. The goal is to generate samples of a discrete random variable that adheres to a posterior distribution, satisfies specific constraints, or optimizes a reward function. This methodological development enables broad applications across downstream tasks such as class-specific image generation and protein design. Existing approaches for controllable generation of masked models typically rely on task-specific fine-tuning or additional modifications, which can be inefficient and resource-intensive. To overcome these limitations, we propose a novel plug-and-play framework based on importance sampling that bypasses the need for training a conditional score. Our framework is agnostic to the choice of control criteria, requires no gradient information, and is well-suited for tasks such as posterior sampling, Bayesian inverse problems, and constrained generation. We demonstrate the effectiveness of our approach through extensive experiments, showcasing its versatility across multiple domains, including protein design." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Discrete Masked Models", "Controllable Generation", "Plug-and-play" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/770128330ce169d9139aeae1d6070f0215f6f095.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Plug-and-Play Controllable Generation for Discrete Masked Models" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4hPwLg7zD3
Fourier Head: Helping Large Language Models Learn Complex Probability Distributions
main
Active
LLM;Fourier;smooth function;multi-class classification
foundation or frontier models, including LLMs
5;5;5;6
4;4;3;3
3;2;2;3
3;2;2;2
3;3;2;3
5.25
3.5
2.5
2.25
2.75
-0.57735
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Can you provide more details on the Decision Transformer and Chronos experiments? How did you choose the size of the models, and how long to train?\n- Can you show how the benefit of using the Fourier head varies as the model size or amount of training data increases? That is, does the benefit of the Fourier head persist at scale or does it vanish?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The proposed Fourier Head layer is well-motivated in domains where classes have a continuous structure\n- The method is straightforward and clearly explained\n- Visualizations clearly demonstrate the advantage of the Fourier head on toy problems in learing continuous densities\n- Experiments in RL and time-series show the Fourier head can improve performance in non-toy settings" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes a Fourier Head based on the Fourier series as a replacement for the usual linear classification head to induce continuous densities across the class IDs. It presents a theoretical analysis of the expressiveness and smoothness trade-off as the number of frequencies increases and empirically shows the advantage of the Fourier Head over the conventional linear head in tasks with output classes with a continuous structure." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- As the paper is focused on improving LLM's ability to model numerical values, there are important related works that explore alternative ways of extracting continuous probabilistic predictions from LLMs over numerical data [1, 2], which are worth discussing. These methods use a hierarchical representation of the numerical values, encouraging nearby values to have similar densities, as they do not correspond to independently predicted classes. These methods therefore do not have the limitations of \"not consider any continuous structure that resides among the tokens\", which the Fourier head claims to address.\n- Similar to methods based on classification over binned values, the Fourier head can only represent a finite range of values. Methods like [1, 2] in principle do not have this issue.\n- The advantage of using the Fourier head seems most significant with small models trained on limited data. At a large scale, the model should be able to learn the continuous structure in the output classes, diminishing the benefit of using the Fourier head. It would be useful to show how the benefit of replacing the linear head with the Fourier scale with training data and model size, such as for Chronos models of different sizes.\n\n[1] Gruver et al. 2023. Large Language Models Are Zero-Shot Time Series Forecasters\n\n[2] Requeima et al. 2024. LLM Processes: Numerical Predictive Distributions Conditioned on Natural Language" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I am unclear about the organization of the paper, such as why the related work is placed in the latter half." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Fourier head allows LLMs to better capture continuous structures in non-linguistic tokens, addressing the limitation in traditional models that use softmax over discrete bins. The authors provide both theoretical justifications and empirical analysis." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a novel Fourier head for large language models (LLMs), designed to improve the modeling of continuous structures in non-linguistic tokens, such as decision-making sequences in games and time series forecasting" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The author posits that Fourier Head can endow the model with a continuity prior, , which can be described as semantic adjacency. However, since LLMs inherently incorporate attention mechanisms that aggregate tokens with higher similarities, the contribution of the Fourier Head seems incremental.\n\n2. Regarding the time series prediction section, the author has employed the T5 architecture, yet the baseline comprises only this architecture, which is overly simplistic. There is a significant body of work on time series LLMs currently, with most eventually employing a linear layer (could also be replaced with a Fourier head), such as TimeLLM GPT4TS[1,2]. I believe the author needs to further supplement the experiments.\n\n3. Additionally, I think the effectiveness of the Fourier Head may stem from its ability to analyze input frequency and amplitude through Fourier series. The author should consider comparing methods that are based on decoupling[3].\n\n[1]Jin M, Wang S, Ma L, et al. Time-llm: Time series forecasting by reprogramming large language models[J]. arXiv preprint arXiv:2310.01728, 2023.\n\n[2]Zhou T, Niu P, Sun L, et al. One fits all: Power general time series analysis by pretrained lm[J]. Advances in neural information processing systems, 2023, 36: 43322-43355.\n\n[3]Cao D, Jia F, Arik S O, et al. TEMPO: Prompt-based Generative Pre-trained Transformer for Time Series Forecasting[C]//The Twelfth International Conference on Learning Representations." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "I included a few questions in my \"Weaknesses\" response. I've also included a few below:\n\n1. How were the frequencies used in the time series experiments chosen? Were they chosen a priori or through a cross-validation procedure? If cross-validation, how were the splits constructed?\n\n2. Why not explore other bases besides the Fourier basis? Is there something intrinsically better about that basis? Alternatively, there are many other parameterizations that would encourage smoothness. For example, one could parameterize only the differences between buckets and regularize these differences to be small. The final probability mass function would be calculated by integrating the differences. Is there a reason to believe a priori that this approach might perform worse?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The proposed method is simple and outlined with clarity in the paper. While the method is not complex, it is relatively novel to my knowledge. The significance is also reasonably large because modeling continuous numerical values using discrete tokens is increasingly popular." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors argue that current methods for parameterizing a discrete distribution over numerical data suffer from ignoring ordinal structure, which should imply that adjacent discrete buckets will have similar density and therefore \"smoothness\" in the probability mass function. To fix this oversight, the authors propose a new parameterization on the coefficients of a Fourier series, leading to a smooth function on the interval [-1,1], which is then quantized. The new parameterization is therefore a drop-in alternative to a uniform bucketing of the interval. The method is evaluated on toy univariate densities as well as on an offline reinforcement learning problem and in time-series forecasting, and the result indicate that using Fourier head leads to lower errors in density estimation and higher returns in reinforcement learning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "To my understanding, the main goal of the paper is to propose a practical method, and given this goal, the empirical evaluation is not very impressive. I'll break this criticism down into a few subcategories\n\n1. Emphasis on smoothness: The authors devote a lot of space and attention to the notion of \"smoothness\", proposing a new metric to measure it and including this metric in all the evaluations. However, from a practical standpoint it's not clear why we should care about smoothness independent of its effect on metrics like MAE or RMSE. In fact, it's possible to contrive examples where we want less smoothness (related to the square wave examples in the appendix), and it's not clear a priori that the marginal distributions for a particular downstream application will be \"smooth\". The \"smoothness\" numbers therefore feel like a distraction from what really matters, which is whether this ordinal inductive bias actually helps the model fit the data distribution. In many cases, the method seems to improve smoothness without affecting reward/loss or vice versa. \n\n2. Limited empirical impact: while Fourier head does seem to yield significant benefits in offline RL, it doesn't seem to have a significant effect on time series modeling. The benefit in terms of MASE and especially in terms of WQL is very marginal, and if I were looking for ways to improve my model, I might not adopt the additional complexity needed for such a small improvement, which is probably on par with tuning other hyperparameters or making small architectural changes. It might be helpful to identify possible explanations for why the effect is relatively minor in time series but more pronounced in offline RL. For example, are the next-action distributions significantly different in their multimodality? It might be much more compelling to replace the time series experiments with additional offline RL experiments if that application happens to be the ideal use case for this method. \n\n3. Limited baselines: Fourier head is only compared to the most naive possible baseline, uniform binning on [-1, 1]. In practice, there are more widely-used alternatives, such as Gaussian mixture models (GMMs) and quantile regression. Both of these techniques have an ordinal bias and should learn solutions that are much more smooth. I don't know if these methods are viewed as out-of-scope in this paper because they are not learned with cross-entropy loss. From one perspective, it might be reasonable to limit the investigation to discrete tokenization methods and discrete loss functions, but it does make the practical impact lower, as it's hard to tell whether this method is actually the best among all simple options or just an improvement upon simple uniform binning. This particular subcategory of criticism feels especially pertinent given the toy experiments in Section 4, where Fourier head is shown to approximate a GMM. It seems reasonable to conclude that in many cases a GMM should also therefore be able to approximate Fourier head. Is the converse not true and how important are the case where GMMs might not be able to match the performance of Fourier head?\n\nBeyond empirical evaluation I think there are also other potential weaknesses:\n\n1. Limited expressiveness: Presumably this method only works for bounded sequences. In the case of RL this might be reasonable if the state and action spaces are constrained. In the case of time series, this limits applications to series without a significant trend component, which would eventually cause the values to exit the range of past observations. \n\n2. Additional hyper-parameters in the form of chosen Fourier series frequencies and regularization strength." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- The justification of the Fourier regularization term as imposing that the coefficients decay as $1/n^2$ is a little strange to me -- this is an asymptotic condition and, in practice, there are a finite number of coefficients, so isn't the condition always vacuously met?\n- For the Decision Transformer, the output space as described in the paper is more naturally a quantization of $S^1 \\sqcup S^1\\sqcup \\\\{0,1\\\\}$ instead of $[-1, 1]$. (Either a shooting direction or a moving direction, each of which takes eight different values arranged on the circle $S^1$. Also two actions without an associated direction.) It would be interesting to see if the Fourier head can be generalized to output spaces that are not naturally interval-shaped.\n- Actually, if I remember correctly, functions can only be approximated by Fourier series if they are periodic, i.e. functions on $S^1$. I suppose this does not affect the toy example and the time-series modelling, since the interval is chosen to be large enough that the distribution is near zero at the boundaries and so is approximately periodic. But I wonder if this is a limitation in other settings.\n- Often, for tasks with continuous-valued target output (e.g. the toy example and time-series example), only a point estimate is necessary, not the full distribution. Hence a good baseline to include for the toy example is an MLP model with only one output dimension (possibly with atan nonlinearity to map outputs to the interval), evaluated on MSE. Likewise for the time-series example, but with MASE.\n\nMinor typos:\n- Line 193: \"hyperparamter\" -> \"hyperparameter\"\n- Line 205: $c_n$ should be $c_k$ \n- Line 244: \"and $D$ be some measure of discrepancy such that $L^2$,\" should be \"such as\"?\n- Line 257: \"Denote by $g_\\sigma(x)$ is\" delete \"is\"\n- Line 515: \"descretized\" -> \"discretized\"" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper provides evidence that the Fourier head is an improvement over the baseline linear head in a wide variety of settings (toy example, agentic decision making, time-series modeling).\n\t- The Fourier head improves both smoothness and accuracy (MLE, MASE, WQL).\n- The exposition of the Fourier head is clear and easy to understand.\n- Various practical details (training objective, hyperparameter choice, regularization, binning strategy) are provided. This is helpful for reproducibility and to those who wish to apply the Fourier head to other tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes the \"Fourier head\" as an alternative to linear classification heads for tasks where the output can be thought of as a quantization of a continuous space. The Fourier head learns the Fourier coefficients of the target function and then quantizes it instead of learning the quantized values directly. The authors theoretically and empirically demonstrate that varying the number of frequencies in the Fourier head trades off between smoothness and modelling performance. The Fourier head is shown to improve over the baseline linear head on a toy task, an agentic decision-making task, and for time-series modelling." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- For regression tasks with continuous-valued target output, it is not clear to me the practical motivation for outputting an entire probability distribution, instead of just a point estimate. Thus one of the main advantages of the Fourier head, that its outputs are more smooth, feels somewhat unmotivated to me. I would like to see more discussion of why exactly the smoothness is beneficial in practice." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Using Fourier series, we build a neural network layer which learns categorical distributions that have a continuous structure." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024fourier,\ntitle={Fourier Head: Helping Large Language Models Learn Complex Probability Distributions},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4hPwLg7zD3},\nnote={under review}\n}" }, "abstract": { "value": "As the quality of large language models has improved, there has been increased interest in using them to model non-linguistic tokens. For example, the Decision Transformer recasts agentic decision making as a sequence modeling problem, using a decoder-only LLM to model the distribution over the discrete action space for an Atari agent. However, when adapting LLMs to non-linguistic domains, it remains unclear if softmax over discrete bins captures the continuous structure of the tokens and the potentially complex distributions needed for high quality token generation. We introduce a neural network layer, constructed using Fourier series, which we can easily substitute for any linear layer if we want the outputs to have a more continuous structure. We perform extensive analysis on synthetic datasets, as well as on large-scale decision making and time series forecasting tasks. We also provide theoretical evidence that this layer can better learn signal from data while ignoring high-frequency noise. All of our results support the effectiveness of our proposed Fourier head in scenarios where the underlying data distribution has a natural continuous structure. For example, the Fourier head improves a Decision Transformer agent's returns by 46\\% on the Atari Seaquest game, and increases a state-of-the-art times series foundation model's forecasting performance by 3.5\\% across 20 benchmarks unseen during training." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "LLM", "Fourier", "smooth function", "multi-class classification" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/1a312ad458147b42adc74373b834f17e5ff69eed.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/5148b34548002960784124310cbf30f4775c21c0.zip" }, "title": { "value": "Fourier Head: Helping Large Language Models Learn Complex Probability Distributions" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4hdDPa9bpI
Graph Fourier Neural Kernels (G-FuNK): Learning Solutions of Nonlinear Diffusive Parametric PDEs on Multiple Domains
main
Active
Neural Operator;Graph Neural Networks;Graph Fourier Transform;Partial Differential Equations;Operator Learning;Cardiac Electrophysiology
applications to physical sciences (physics, chemistry, biology, etc.)
3;3;5;8
3;3;3;4
2;3;2;3
2;3;2;3
2;3;2;3
4.75
3.25
2.5
2.5
2.5
0.916949
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "**Major:**\n\n- Is there an error estimator in the prediction? How can one trust the outcome of fitted G-FUNK model for an unseen problem?\n- Is there a way to enforce conservation laws (or some notion of structure preservation) in prediction if the underlying PDE admits such constraint?\n- Does the solution/data need to be smooth? Can you try out viscous burger's equation with emerging discontinuity as viscosity goes to zero?\n- Is the outcome ODE stable? Is there a guarantee on stability of ODE?\n- Can you compare the Data-Generation/Training/Prediction time of proposed method versus the ones from a standard finite element/volume solver in the presented test cases? I believe comparing only the time/complexity of prediction against standard solver is very much misleading.\n- If we know the underlying PDE, wouldn't it make sense to incorporate that in the loss, similar to what PINN does?\n- Can the author show a case of extrapolation? To me, similar to PINNs, the proposed method can only be used as an efficient interpolator within the space of training data. This makes me doubt the claims on \"parameter and domain-adaptation\". For example, if you train your model to estimate heat equation in 1d problem inside the domain [0,1], can you test it on domain [-10,10] with different boundary conditions?\n\n**Minor:**\n\n- Abstract: “… for which the highest-order term in the PDE is diffusive...” What does it mean? Do you mean the highest-order term is even or, it has to be 2? I’m guessing second-order PDEs, which needs to be clarified in Abstract.\n- Abstract: \"without the need for retraining\" and not \"without need for retraining”." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper proposes merging graphs within the standard surrogate models, which allows estimating the solution of 2nd order PDEs with varying coefficients on complicated geometries." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors proposes an interesting surrogate model that combines Graph as the discretization method for Fourier neural operators. I believe the manuscript may be accepted after a major revision." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I believe there are several important details missing in the manuscript. Below, I list them." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "NA" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The numerical demonstration is the cardiac EP example is interesting since I understood the least. I will rely on an expert in this area to give a meaningful comment on this example." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a neural network model to learn a solution operator of time-dependent second-order semi-linear PDEs that takes diffusion tensor and random sample points of a family of domains as inputs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "This paper proposes a neural network model to learn a solution operator of time-dependent second-order semi-linear PDEs that takes diffusion tensor and random sample points of a family of domains as inputs.\nWhile the first input has been considered by many authors, the difficulty here is to allow the domain to be chosen from a family of domains, denoted by $\\\\{\\Omega_\\alpha\\\\}_{\\alpha\\in \\mathcal{A}}$. My first thought is whether the setup makes sense since there is no discussion on the class of domains that are imposed. I believe this won't work on arbitrary classes of domains. For example, e.g., for Riemannian manifolds, I would believe that if any pair of manifolds in this class have Riemannian metrics that are diffeomorphic (or even stronger, such as conformally equivalent), then the learning problem makes sense. One would need a notion of continuity between any pair of domains in the class, otherwise, it is not feasible to interpolate (to have a map that can interpolate between the training domains). In the numerical examples shown in the paper, there is an affine transformation between any pair of arbitrary side lengths in the 2D Nonlinear Reaction-Diffusion. For the Cardiac Electrophysiology, although the measured data come from 25 patients, the PDE is solved on processed domains (finally 24 of these), and I suspect that these domains are diffeomorphic.\n\nThe only interesting numerical demonstration is the cardiac EP example since I understood the least. I will rely on an expert in this area to give a meaningful comment on this example. In terms of methodology, I cannot understand why the Graph Laplacian structure is helpful, unless when the derivatives in Eq. (2) are defined with respect to the Riemannian metric of the embedded manifolds. It is also not obvious to me why the construction of the G-FUNK layer should be a way to go since I cannot reason it from any basic principle. \n\nWhile some numerical results look interesting, I don't really understand why the approach should work in general due to the lack of theoretical justification. I am also not sure how the approach behaves if one increases the number of layers or parameters in each G-FUNK layer. Finally, the three numerical examples are low-dimensional problems (2D or 3D), I would naively believe the standard PDE solvers should be able to solve the problem accurately in a reasonable time. Solutions to these (FEM in the cardiac EP example) are being used to train the G-FUNK model. Based on these concerns, I believe this paper is technically (or mathematically) not interesting and not suitable for publication in ICLR. I would urge the authors to consider submitting this work to a domain science journal that is relevant to personalized cardiac EP." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Why do the authors use the neural ODE model? Any gain from the specific model?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. G-FuNK leverages parameter- and domain-adapted spectral method such that it is well-suited for problems involving anisotropic diffusion and complex geometries.\n\n2. The application on Cardiac Electrophysiology is very interesting.\n\n3. The paper presents a detailed comparison with FNO and GNN methods, showing that G-FuNK can outperform these methods" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposed Graph Fourier Neural Kernels (G-FuNK), which aim to solve time-dependent, nonlinear partial differential equations (PDEs) with varying parameters and domains. G-FuNK leverages parameter- and domain-adapted spectral method. These operators are particularly well-suited for problems involving anisotropic diffusion and complex geometries. The paper demonstrates G-FuNK's effectiveness on several applications, such as heat equation simulations, reaction diffusion equations, and cardiac electrophysiology, showing promising results in terms of accuracy and computational speed." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The limitations should be addressed.\n\n2. The so-called graph fourier transform is actually spectral method, which need the eigenvectors pre-calculated first. This procedure could make the method not useful for large scale problem. Computational efficiency and scalability should be reported, including offline computational for the eigenvectors and online computation time. Comparison with FNO and GNN in terms of efficiency is also absent.\n\n3. The novelty of graph fourier transform is limited. This is a widely studyed area in graph neural network community." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Given that the paper focuses on PDEs on graphs, it seems that the \"Multipole Graph Neural Operator for Parametric Partial Differential Equations\" (MGKN), which claims to be mesh-invariant, would be a more suitable baseline for comparison. While the original MGKN framework does not explicitly tackle changing geometry, it seems it can still be applied in this setting, since only the graph is used as an input. There are also many similarities between these two approaches: the Fourier transform and inverse Fourier transform in G-FuNK play a role similar to kernel convolutions in MGKN, which are computed using the multipole algorithm. Additionally, both methods employ some form of truncation to make computation more tractable (e.g., limiting modes or long-range interactions). I would appreciate further discussion or comparison of these two approaches." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The writing is clear, and the combination of graph neural networks (GNNs) and FNO is well motivated and novel to the best of my knowledge. The numerical examples show a gradual increase in complexity, leading to the final example on cardiac electrophysiology, which is complex and demonstrates the practical utility of the proposed framework." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes Graph Fourier Neural Kernels (G-FuNK) for learning solution generators of time-dependent partial differential equations (PDEs) on graphs. G-FuNK aims to be \"geometry-invariant\" by leveraging the spectral domain of graphs through the Graph Fourier Transform (GFT), similar to how the Fourier Neural Operator (FNO) achieve \"discretization-invariance\" in regular domain." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "In Table 1, for the three methods (G-FuNK, FNO, GNN), the number of parameters can differ by orders of magnitude, making it challenging to evaluate the improvement in performance. \n\nThe statement \"Our method predicts entire trajectories in under 1 second, significantly outperforming traditional numerical methods. For example, cardiac EP simulations typically take at least 15 minutes on 12 CPU cores for one given set of initial conditions.\" could be more precise, particularly regarding the numerical solution being compared. It seems the cardiac EP simulations from numerical methods serve as the high-fidelity solutions that are used to generate the training data and evaluate the error. However, G-FuNK learns on reduced modes (k_max), which may lead to limited accuracy. It would be more informative if the numerical solutions are computed on coarser mesh that achieve similar of accuracy as G-FuNK, or using the reduced modes." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Learning temporal dynamics of diffusive PDEs on multiple anisotropic domains using neural operators that embed Graph Fourier Transforms." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024graph,\ntitle={Graph Fourier Neural Kernels (G-Fu{NK}): Learning Solutions of Nonlinear Diffusive Parametric {PDE}s on Multiple Domains},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4hdDPa9bpI},\nnote={under review}\n}" }, "abstract": { "value": "Understanding and predicting the time-dependent dynamics of complex systems governed by non-linear partial differential equations (PDEs), with varying parameters and domains, is a difficult problem that is motivated by applications in many fields. We introduce a novel family of neural operators based on a Graph Fourier Neural Kernel (G-FuNK), for learning solution generators of nonlinear PDEs with varying coefficients, across multiple domains, for which the highest-order term in the PDE is diffusive. G-FuNKs are constructed by combining components that are parameter- and domain-adapted, with others that are not. The latter components are learned from training data, using a variation of Fourier Neural Operators, and are transferred directly across parameters and domains. The former, parameter- and domain-adapted components are constructed as soon as a parameter and a domain on which the PDE needs to be solved are given. They are obtained by constructing a weighted graph on the (discretized) domain, with weights chosen so that the Laplacian on that weighted graph approximates the highest order, diffusive term in the generator of the PDE, which is parameter- and domain-specific, and satisfies the boundary conditions. This approach proves to be a natural way to embed geometric and directionally-dependent information about the domains, allowing for improved generalization to new test domains without need for retraining. Finally, we equip G-FuNK with an integrated ordinary differential equation (ODE) solver to enable the temporal evolution of the system's state. Our experiments demonstrate G-FuNK's ability to accurately approximate heat, reaction diffusion, and cardiac electrophysiology equations on multiple geometries and varying anisotropic diffusivity fields. We achieve low relative errors on unseen domains and fiber fields, significantly speeding up prediction capabilities compared to traditional finite-element solvers." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Neural Operator", "Graph Neural Networks", "Graph Fourier Transform", "Partial Differential Equations", "Operator Learning", "Cardiac Electrophysiology" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/6a6f2501d3a63350a719d0b1fd2daacd46b085ef.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/cb43aaf8ab29860735ecce83d987c8e7d62cf9fe.zip" }, "title": { "value": "Graph Fourier Neural Kernels (G-FuNK): Learning Solutions of Nonlinear Diffusive Parametric PDEs on Multiple Domains" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4hp2bVdaHU
Data-Aware Training Quality Monitoring and Certification for Reliable Deep Learning
main
Active
Deep learning;data-driven bounds;training process;training quality monitoring;safe AI;reliable AI training;regulatable AI;performance certification
optimization
3;3;3;5
3;4;2;2
2;1;1;2
2;1;2;2
2;2;2;2
3.5
2.75
1.5
1.75
2
-0.522233
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Can you clarify how the YES training bounds directly contribute to improvements in model robustness and safety?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The proposed system's clarity is enhanced by the color-coded cloud-based monitoring system, which makes it intuitive for practitioners to interpret training status visually." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces YES training bounds, a framework for real-time, data-aware certification and monitoring of neural network training. The framework evaluates data utilization efficiency and optimization dynamics, providing insights into training progress and detecting suboptimal behavior. The paper validates the YES bounds using synthetic and real data experiments, offering a tool for certifying training quality and guiding performance enhancements." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "This paper has over-claimed its applicability, especially in model robustness and safety. None of the experiments discuss model safety and robustness. Accepting this paper unchallenged may send the wrong signal that the proposed method for enhancing model safety or robustness has been vetted, which it has not due to the omissions of related experiments." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Q1: Could the authors please clarify L090-L092: \"They do not produce varying certification results across different training realizations, even when initialized identically or following similar optimization paths.\"\n\nQ2: Why would randomness (L088-L095) be such an issue?\n\nQ3: $\\mathbf{Y}_{k}$ is never defined. What does it denote?\n\nQ4: Could the authors clarify L245-L253. How does one obtain the YES-SIGMA bound precisely?\n\nQ6: How will the YES cloud help regulators?\n\nQ7: Is there a formalism to justify why projecting from each layer to $Y$ is reasonable?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "S1: Convergence diagnostics is a worthwhile problem to study. The criticism of local, curvature based convergence diagnostics is justified and hence, providing a empriical, yet strategic \"benchmark\" framework is an interesting approach.\n\nS2: The proposed method is simple and readily accessible, even to a non-specialist audience, that is likely to benefit most from \"training support\"." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a heuristic for convergence diagnostics in multi-layer perceptrons that is data-aware. The authors propose to use solve a OLS-like linear problem for each layer to determine what a reasonable, but suboptimal weight matrix for each layer is. They then propose a traffic light system that compares training loss to this heuristic." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**Motivation**\n\nW1: I am sceptical of the utility of a \"standardization of training practices in deep learning\" (L078). The author's more detailed account that \"The proposed YES [is] a promising pathway toward establishing a benchmark for the AI industry, regulators, and users alike\" (L070) is vague and does not provide concrete details on _how_ this is valuable. For instance, the authors could provide examples or consult recent legislation on this issue.\n\nW2: Prior work is not properly cited. In fact, only a single paper (Oymak et al.) on neural network optimisation / convergence diagnostics is cited.\n\n**Presentation**\n\nW3: The Appendix is missing.\n\nW4: The language and notation for the main results is not always clear. Examples:\n\n- Eq 3 uses $\\mathbf{Y}_k$ without ever defining it before. I suggest the authors provide a clear definition first.\n- First paragraph in Section 4.1 is not clear (L183 - 189). Same for L245-L253.\n- The paper describes a theoretical optimal model, the actual model, and the \"bounding\" model obtained by setting weights through the pseudoinverse. It is not always clear which of these models weight matrices or activations belong to.\n- It is not clear what \"intermediate states\", \"intermediate mappings\", \"intermediate points\" are.\n\nW5: The authors are inconsistent in their claims and tend to overstating their contributions. While the authors state at times that their method is a \"sanity check\", they later state:\n\n- \"[...] can attest that the training is not proper.\" (L205)\n- \"The answer (YES or NO) will provide immediate relief as to whether training has been meaningful at all\" (L211)\n- \"The reason is simple: heuristics outperform random, and optimal beats heuristic\" (L521) or L534 to L536.\n- \"These bounds aim to provide a qualified answer to the question as to whether a neural network is being properly trained by the data: YES or NO?\" (L179).\n- \"cloud unequivocally indicate suboptimality\" (L085). This is a very strong statement, for which there is no supporting evidence.\nA more precise account of the contributions and limitations would be appropriate as well as fewer absolute statements without justification.\n\nW6: The proposed method is a _heuristic_ not a certificate, which typically describes provable statements that can be asserted about a model.\n\nW7: The authors are overstating the impact to the safety of models. This work predominantly cites literature on ML safety, but does not set out a clear path how their work impacts safety, how it can establish trust and how it will help regulators. The statement \"This standardization could play a crucial role in fostering trust and accountability within the AI ecosystem.\" lacks proper justification.\n\n**Method**\n\nW8: Using a linear model as baseline during training as bechnmark is common practice (when appropriate). The main insight seems to be the prediction of the target from each intermediate layer. I disagree with the authors statement: \"A sensible but sub-optimal approach\" (L185) and do not see sufficient justification for this statement. The authors state: \"it has also been observed in various machine learning problems that after extensive training (resembling what we can describe as optimal training), the output of some inner layers become something meaningful to domain experts\" (L255-258), for which they do not provide sources.\n\nW9: layerwise OLS solutions are a very basic heuristic that do not mark significant contributions to the ML community. One can trivially, see that this bound becomes vacuous, even for single layer models when replacing ReLU with sigmoid (i.e. regard data-generating model $\\sigma(AX+e)$ where $A$ has large values.)\n\nW10: At times, the authors do not provide sufficient proof or citation when making non-trivial statements.\n\n- \"Given a judicious selection of ... the latter should provide a tighter error bound compared to the YES-0 bounding approach\" (L252). What is a judicious selection? Such claim should be supported by a theorem or a more detailed analysis.\n- L255-258 as cited above.\n\nW11: The authors solely focus on the train loss, when in practice the test loss is most relevant in learning problems.\n\n**Experiments**\n\nW12: The paper describes experiments on 2 small toy data. This is not enough to extrapolate to real impact. There are various regression and classification datasets that seem like suitable tests for this method. In particular, Boston / California Housing Prices, SVHN, CIFAR-10(0) or TinyImageNet. Convergence diagnostics will become more relevant the more complicated the loss landscape and the more non-linear the problem gets. At the same time the proposed bound will become more vacuous in these settings. A detailed discussion of this would be of interest. Larger, more complex, and high-dimensional datasets are important to judge the potential impact of this method.\n\nW13: The authors do not compare their method against diagnostics baselines. For instance, fitting a simple one-layer linear model, or discussing other strategies for convergence diagnostics.\n\nW14: Figure 3 stops before models are converged.\n\n**Minor**\n\nMW1: It is community standard to have pdf links between in-text citation and the bibliography. This would be appreciated. Citations should be in round parantheses when not not part of the sentence's grammatical structure, e.g. (Goodfellow et al., 2016).\n\nMW2: Figure 3: should share X and Y axis.\n\nMW3: The lack of algorithm boxes, makes it difficult to follow the exact procedures described." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Please discuss the relevant work in the field of optimization and clarify the novelty of this paper.\n2. Please clarify the contribution and explain the application of YES bounds in the field of model robustness and reliability.\n3. Please explain why the enhanced bounds in Section 4.2.2 can tackle the issues discussed in Section 4.2.1.\n4. Please provide more evaluation on different CV/NLP tasks to highlight the generalization of the proposed bounds." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The discussed topic is interesting, which focuses on monitoring the training quality and progress. With the proposed bounds, users could better control the optimization, which could benefit the community.\n2. By considering the specific structure and properties of training data, the proposed YES bounds could provide tailored and precise evaluations of training performance.\n3. Some experiments on image denoising task demonstrate the effectiveness of the proposed bounds." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces YES training bounds, a framework for real-time, data-aware certification of training, which aims at assessing the quality of model training. Specifically, these bounds evaluate the efficiency of data utilization and optimization dynamics. The depth and non-linear activation functions of models are taken into consideration. Experimental validation on synthetic and real data demonstrates the effectiveness of the bounds in certifying training quality." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. No detailed discussion of relevant works, which makes it difficult to situate this paper. Some discussions of relevant optimization works are missing, such as [a, b].\n2. The contribution of this paper could be overclaimed. YES bounds are introduced to indicate the training quality. Although the authors claim that the proposed training bounds aim at improving the reliability, robustness, and safety of models, it is difficult to see how the YES bound can be utilized for such a purpose.\n3. This paper discusses the scenario of non-direct paths in Section 4.2.1, however, it is difficult to see how the enhanced bounds that involve intermediate points can tackle this issue.\n4. Another concern lies in the evaluation, it is unclear whether the proposed training bounds can be generalized to different CV and NLP applications, such as image generation via diffusion, VQA via LLaVa, etc.\n5. It is also unclear how the proposed training bounds can motivate new research works or provide insights into this field.\n\n[a]. Generalization Bounds for Stochastic Gradient Descent via Localized ε-Covers. NeurIPS 2022.\n\n[b]. Closing the convergence gap of SGD without replacement. ICML 2020." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Please address my comments in the Weaknesses section.\n2. The experiments show that sometimes the training gets stuck in loss plateaus while in the yellow region. Is there any action that one might take to avoid or overcome this obstacle, other than waiting for the loss to drop? \n3. It is not clear how Deep Unfolding Networks are relevant in the context of this paper, as they are only briefly mentioned in section 4.2.1, without sufficient explanation, and then never mentioned again. Can you give a more detailed explanation?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The idea of using linear projections as a weak model to compute upper bounds on the training loss is interesting and original. \n\nThe method is well-described and easy to understand. Mathematical proofs are easy to follow.\n\nGetting insights on the training process beyond the local optimization perspective might be a promising and relevant direction for the future of ML optimization." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper explores the challenge of monitoring the quality of training for deep learning models. The paper proposes a method based on upper bounds for the training loss, estimated using linear projections from different layers during training. The authors show that comparing the training loss with the estimated bounds can provide real-time insights into the quality of the training procedure, facilitating the intervention in case of ineffective or sub-optimal training." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The paper only analyzes small networks with FeedForward architecture and only Linear layers. It is not clear how the procedure could be applied to practical architectures such as ResNet or Transformers. Additionally, there are no concrete details about the architectures used. I assume all hidden layers have the same dimensionality as the input and output, but this should be clearly stated. I suggest the authors extend their experiments to more complex architectures and to include a detailed description of the architectures used (e.g. a table with the number of layers, number of hidden neurons per layer, activation functions).\n* The tasks used for training the networks are not clearly explained. It is not clear what is the input and output of the models, what is the loss function and whether the trained models can achieve any significant generalization power. In particular, for the 1D Signal Denoising task, it is unclear how the random signal drawn from $N(0,1)$ could be recovered after applying noise drawn from $N(0,0.2)$. I would like to see more detailed explanations regarding the tasks and the training setup.\n* The practical image recovery experiment in the Appendix does not seem to have real practical applications. My understanding is that the model is specifically trained to recover one image after it has been corrupted, which would require full access to the uncorrupted image during training. If this is correct, then this example has no more practical applicability than the synthetic data tasks. It is also unclear how the network is constructed and what are its inputs and outputs. I suggest adding some more detailed explanations about the training and the model architecture.\n* The paper presents no analysis of the running time of the proposed method. It is unclear how the method will impact the training time of the model under real-time conditions. It would be interesting to see a comparison of training times with and without the YES bounds computation and an analysis of how the computational overhead scales with model size (I would expect the costs of estimating higher order YES bounds to significantly increase for very deep models).\n* The utility of the proposed method concerning train-test generalization is only presented in the Appendix. I consider that this experiment should be presented in the main body of the paper, as ultimately the purpose of training models is generalization to unseen test data. However, it is unclear how well the model is able to generalize in this case: Figure 4 clearly shows a correlation between the evolution of the training and testing losses, but the minimum value of the test loss is similar to the starting value of the training loss, hinting towards very poor generalization capabilities. This is very likely caused by the low correlation between train and test data, so repeating the experiment on real-world data might show more favorable results. Additionally, this experiment does not present any training details (learning rate, batch size, train/test split ratio etc.).\n\n**Minor points**\n* Algorithm 1 should have the model as input along with the training data.\n\n**Formatting**\n* The Appendix should be part of the main pdf, not as a separate document\n* References to Sections, Figures, Tables, Equations and Citations should have hyper-ref links for better navigability. \n* Multiple citations are not well integrated into the text, for example: Line 031 should read “... transformations (LeCun et al.2015, Goodfellow et al. 2016).” which can be achieved by using the \\citep{} command. Line 041 should read “Oymak & Soltanolkotabi (2019) theoretically demonstrate …” which can be achieved by using the \\citet{} command.\n* Line 050 has an unmatched parenthesis after YES.\n* Figures 3,4 should have a clear ordering for the learning rates (instead of 1e-3, 1e-2, 1e-4). Subfigure labels for Figures 3,4,5 would also facilitate understanding." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024dataaware,\ntitle={Data-Aware Training Quality Monitoring and Certification for Reliable Deep Learning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4hp2bVdaHU},\nnote={under review}\n}" }, "abstract": { "value": "Deep learning models excel at capturing complex representations through sequential layers of linear and non-linear transformations, yet their inherent black-box nature and multi-modal training landscape raise critical concerns about reliability, robustness, and safety, particularly in high-stakes applications. To address these challenges, we introduce YES training bounds, a novel framework for real-time, data-aware certification and monitoring of neural network training. The YES bounds evaluate the efficiency of data utilization and optimization dynamics, providing an effective tool for assessing progress and detecting suboptimal behavior during training. Our experiments show that the YES bounds offer insights beyond conventional local optimization perspectives, such as identifying when training losses plateau in suboptimal regions. Validated on both synthetic and real data, including image denoising tasks, the bounds prove effective in certifying training quality and guiding adjustments to enhance model performance. By integrating these bounds into a color-coded cloud-based monitoring system, we offer a powerful tool for real-time evaluation, setting a new standard for training quality assurance in deep learning." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Deep learning", "data-driven bounds", "training process", "training quality monitoring", "safe AI", "reliable AI training", "regulatable AI", "performance certification" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/2d10f4a3c93596f30077b4715290ca57d619845b.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/b90686d5828c6644dc515f4207a4716f5fefd48a.pdf" }, "title": { "value": "Data-Aware Training Quality Monitoring and Certification for Reliable Deep Learning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4iFSBgxvIO
Cached Multi-Lora Composition for Multi-Concept Image Generation
main
Active
Low-Rank Adaptation (LoRA);Multi-LoRA composition;Text-to-image models;Computational efficiency
applications to computer vision, audio, language, and other modalities
5;5;5
5;4;5
2;2;2
2;2;2
3;3;3
5
4.666667
2
2
3
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "It's unlikely that this work has more potential to generate harmful images than the previous published work." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Does a multiple LoRA mechanisms ensemble improve the behavior of the generative model in terms of concepts that are under-represented at the data level?\nCan you provide some examples in which the method shows improved semantic consistency? \nWhy are the claims at the end of page 9 and the beginning of page 10 not proven through a visual comparison?\nWhat (or how can be quantified) is the computational effort of the compared methods?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper is generally well written and, (at least for the class of similar papers) rather easy to follow. \nThe claims of the authors, on which the paper writing discourse is based on, are verified through evaluations which can become clear, if correctly exemplified." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors propose an analysis of typical LoRA algorithms when subjected to a caching mechanism. The study is further extended with the proposal of a framework integrating multiple LoRA mechanisms, aiming at reducing concept-related uncertainty, which is expected to show reduced semantic misconceptions. The proposed method is extensively evaluated in terms of CLIPScore and MiniCPM-V testing." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Even if the writing is good, the quality of the visuals (e.g Fig 4, 6) can be improved. \nA lack of visual comparisons is not expected, given the fact that the most of the evaluations showing a certain advantage of the proposed method are either purely subjective or extremely difficult to quantify. \nAt least in terms of quantitative evaluations (in terms of CLIPScore), the introduction of the cache mechanism does not show consistent results, but rather mixed. A systemic improvement/degradation of the performance its difficult to identify or explain, at least for the cache mechanism analysis. \nA total lack of evaluations in terms of computational effort/efficiency." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.I’m not sure about Figure 1. Do you assume that meaningful amplitude difference happens only at the same time steps for the two Loras? In another word, do you assume different Lora categories are well-aligned along time step? Furthermore, given that the observation in Figure 1 motivates the proposed method, it’s suggested to provide comprehensive analysis to explain the high/low frequency issues of different Loras.\n\n2.The proposed solution in Section 2.2 is presented without deep analysis. Can you please provide a high-level analysis of your solution to explain your method in a progressive way? e.g. eq (2), eq (3) is introduced directly without explain why.\n\n3.The observation is based on Lora categories from Ref1. How does the method perform with respect to different Lora categories? \n\n4.The collective guidance in eq (5) seems related to classifier free guidance, can you please provide further analysis?\n\n5.Benchmark comparison in Table 1 seems marginal performance gain. Please explain further.\n\n6.Please also explain in detail the “semantic conflict” issue as there exists no experiments to verify the existence of this issue (or maybe I failed to find it, please show me where I can find it.)\n\nRef1, Multi-lora composition for image generation, 2024." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.Frequence domain analysis for multi-component generation is indeed an interesting idea. \n\n2.The proposed solution is easy and clear (although high-level insight is not very obvious.)\n\n3.The experiments are good in explain the effectiveness of the solution." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper works on fixing issues of using Lora for multi-concept image generation. Particularly, this paper empirically find that some LoRAs amplify high-frequency features, and others focus on low- frequency elements. Based on this observation, a frequency domain based sequencing strategy is presented to determine the optimal order in which LoRAs should be integrated during inference, and a training-free framework, namely Cached Multi-LoRA (CMLoRA), is designed to integrate multiple LoRAs while maintaining cohesive image generation. Experiments suggest that CMLoRA outperforms SOTA training-free LoRA fusion methods for multi-concept image generation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.It’s not clear why frequency domain is needed to solve the multi-component generation task. A clear investigation and analysis on how they come up with this solution can further strengthen the contribution of the work. Particularly, more analysis is needed to explain why shift attention from spatial domain to frequency domain. \n\n2.The observation that some LoRAs amplify high-frequency features, and others focus on low- frequency elements is based on a naïve experiment. More analysis or theoretical analysis is needed to better appreciate the proposed idea.\n\n3.The experimental results is good but not convincing to explain the superiority of the solution." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Could you provide examples or further analysis of cases where CMLoRA might struggle with semantic conflicts? For example, are there certain LoRA combinations or types of images where the method performs suboptimally?\n\n2. Could you clarify how you determined the values for the caching hyperparameters $c_1$ and $c_2$? Did you observe any significant performance variations with different values, and if so, could you provide insights on optimal settings?\n\n3. Have you tested CMLoRA on datasets beyond the anime and realistic styles in the ComposLoRA testbed? If not, could you discuss how the method might adapt to other domains?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper introduces a novel Fourier-based approach to address the challenge of multi-LoRA composition by partitioning LoRA modules into high- and low-frequency categories. This frequency-aware sequencing strategy is innovative, as it moves beyond the typical naive integration of LoRAs by leveraging the frequency domain to systematically order their application during inference. This approach effectively mitigates semantic conflicts and represents a creative combination of LoRA adaptation with Fourier-based analysis, contributing a unique perspective to the field of multi-concept image generation.\n\n2. The paper’s methodology is sound and well-supported by rigorous experimentation. The introduction of the Cached Multi-LoRA (CMLoRA) framework is methodically detailed, with clear mathematical formulations and a thorough explanation of the caching mechanism. The empirical evaluations are comprehensive, covering a range of established metrics like CLIPScore and MLLM-based benchmarks, which validate the claims across different aspects of multi-concept image synthesis, including element integration, spatial consistency, and aesthetic quality.\n\n3. The proposed CMLoRA framework addresses a significant limitation in current LoRA-based image generation methods by enabling efficient and high-quality integration of multiple LoRA modules. The training-free nature of CMLoRA increases its practical applicability, making it more accessible for scenarios where training resources are limited or infeasible." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces Cached Multi-LoRA (CMLoRA), a framework for training-free, multi-concept image generation that integrates multiple Low-Rank Adaptation (LoRA) modules in text-to-image diffusion models. By analyzing LoRAs in the Fourier domain, CMLoRA partitions LoRAs into high- and low-frequency sets, applying high-frequency LoRAs in early denoising stages and low-frequency ones later to reduce semantic conflicts. A novel caching mechanism selectively activates non-dominant LoRAs, enhancing computational efficiency while maintaining image quality. Evaluated against existing methods, CMLoRA shows superior performance in aesthetic and compositional quality, demonstrating its effectiveness for generating complex, coherent images from multiple concepts." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. What are the failure cases? A couple of visual examples of failed outputs could provide more insights into the limitations of the CMLoRA method. \n\n2. How were the caching hyperparameters $c_1$ and $c_2$ chosen, and how sensitive is the model’s performance to their variations? Furthermore, there is limited discussion of how the caching interval impacts the final performance in terms of both computational efficiency and image quality. Additional experiments that explore the impact of varying these parameters would make the paper’s claims around caching strategy more robust and actionable for readers interested in applying or extending CMLoRA.\n\n3. What is the exact impact of the frequency-based LoRA partitioning, and would alternative sequencing strategies be effective?\n\n4. The paper’s evaluations focus primarily on a limited set of datasets (anime and realistic styles within the ComposLoRA testbed) and may not generalize to broader multi-concept applications. Furthermore, CLIPScore and the other metrics used may not fully capture nuances in compositional fidelity, particularly as the number of LoRAs increases. Expanding the scope of datasets and incorporating additional image quality metrics, such as perceptual quality or domain-specific measures, would strengthen the applicability of CMLoRA across a wider range of practical scenarios." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024cached,\ntitle={Cached Multi-Lora Composition for Multi-Concept Image Generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4iFSBgxvIO},\nnote={under review}\n}" }, "abstract": { "value": "Low-Rank Adaptation (LoRA) has emerged as a widely adopted technique in text-to-image models, enabling precise rendering of multiple distinct elements, such as characters and styles, in multi-concept image generation. However, current approaches face significant challenges when composing these LoRAs for multi-concept image generation, particularly as the number of LoRAs increases, resulting in diminished generated image quality. \nIn this paper, we initially investigate the role of LoRAs in the denoising process through the lens of the Fourier frequency domain.\nBased on the hypothesis that applying multiple LoRAs could lead to \"semantic conflicts\", we have conducted empirical experiments and find that certain LoRAs amplify high-frequency features such as edges and textures, whereas others mainly focus on low-frequency elements, including the overall structure and smooth color gradients.\nBuilding on these insights, we devise a frequency domain based sequencing strategy to determine the optimal order in which LoRAs should be integrated during inference. This strategy offers a methodical and generalizable solution compared to the naive integration commonly found in existing LoRA fusion techniques.\nTo fully leverage our proposed LoRA order sequence determination method in multi-LoRA composition tasks, we introduce a novel, training-free framework, Cached Multi-LoRA (CMLoRA), designed to efficiently integrate multiple LoRAs while maintaining cohesive image generation.\nWith its flexible backbone for multi-LoRA fusion and a non-uniform caching strategy tailored to individual LoRAs, CMLoRA has the potential to reduce semantic conflicts in LoRA composition and improve computational efficiency.\nOur experimental evaluations demonstrate that CMLoRA outperforms state-of-the-art training-free LoRA fusion methods by a significant margin -- it achieves an average improvement of $2.19$% in CLIPScore, and $11.25%$% in MLLM win rate compared to LoRAhub, LoRA Composite, and LoRA Switch." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Low-Rank Adaptation (LoRA)", "Multi-LoRA composition", "Text-to-image models", "Computational efficiency" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/fc7b12ee5e7ea5b8363ff04829acdd265eaa4184.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Cached Multi-Lora Composition for Multi-Concept Image Generation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ihkxIeTFH
FAdam: Adam is a natural gradient optimizer using diagonal empirical Fisher information
main
Active
Optimizer;Adam;Natural gradient descent;Second order optimization;Information geometry;Riemannian geometry;Differential geometry;Tensor calculus;Deep learning;Fisher Information;Hessian;Curvature
optimization
3;3;3;3;3
3;2;4;4;5
2;2;1;2;3
1;2;2;2;2
1;3;2;2;3
3
3.6
2
1.8
2.2
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weaknesses section." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. Understanding Adam is an important problem. Using fisher information and natural gradient descent to understand Adam is novel.\n\n2. The presentation of this paper is good in general." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the connection between Adam optimizer and natural gradient descent by leveraging techniques from Riemannian geometry. Based on this, the authors propose a modified algorithm named Fisher Adam (FAdam). The convergence analysis of FAdam is provided and the algorithm is tested by large language model (LLM) experiments." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The theoretical analysis of FAdam is weak. It directly follows the paper (Defossez et al. 2020) and requires strong assumptions (e.g., $\\beta_1$=0, bounded gradient). So it does not analyze the algorithm's momentum and is worse than the state-of-the-art analysis of Adam in the literature. \n\n2. I do not find any rigorous presentation of Adam's flaws in the paper as claimed in the abstract by the authors. For example, the paper does not have any clear negative results of the vanilla Adam experimentally or theoretically. Therefore, the motivation of designing a new variant of Adam such as FAdam is unclear to me.\n\n3. The description of the experiment is unclear. Lots of details are missing, such as training/test learning curve comparison, learning rate of the optimizer, batch size, and memory costs. Also, the experiment is only run once, and the algorithm's robustness is unclear." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- What is the exact argument for the \"invariant natural gradient\"?\n- Kunstner et al. (2019) explicitly critique the interpretation of Adam as approximate NGD. What is your response to their arguments? (E.g., degeneracy of the empirical FIM for overparametrized models, sensitivity to model misspecification, no relationship between empirical and true FIM far from an optimum.)\n- How were the $\\epsilon$ values in the experiment chose?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The argument relating decoupled weight decay to the information-geometric interpretation is interesting. It clarifies that the gradients used to compute $v$ (the diagonal empirical FIM) must be gradients of the log likelihood of a probabilistic model to match the definition of the FIM and therefore must not contain regularizers or auxiliary losses.\n- Averaging the preconditioned gradients (versus preconditioning the averaged gradient) is an interesting variant." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper reiterates and expands the motivation of Adam as approximate natural gradient descent. It derives multiple modifications to the Adam algorithm based on that interpretation. The resulting method (FAdam) is evaluated experimentally." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper presents as an original finding that it \"establishes a mathematical foundation for the Adam optimizer\" in terms of NGD with the empirical Fisher information matrix. This is misleading. This motivation has been given in the original Adam paper and has since been discussed and critiqued in various papers, including but not limited to Kunstner et al. (2019). This should be made transparent in the discussion of related work.\n- The paper states that \"for using natural gradient optimizers [...] the loss function must be in the form of the log-likelihood\". This is not a factual statement and should be adjusted. Preconditioning with the Fisher information matrix adapts to the geometry induced by a certain probabilistic model. The negative log likelihood under said model may be a \"natural\" objective function to optimize, but NGD can meaningfully be applied to any other objective. In fact, in Section 3.4.3, the authors advocate for preconditioning an additional loss term with the FIM. \n- The argument in Section 3.4.1 regarding the use of the square-root on the preconditioner is not stringent. If I am understanding correctly, the argument is that $\\Vert \\nabla J/\\sqrt{f} \\Vert^2_2 \\approx \\Vert F^{-1} \\nabla J\\Vert_F^2$, i.e., preconditioning with the square-root makes the Euclidean norm of the resulting update equal the \"Fisher norm\" of the natural gradient. However, there is no discernible argument why it would be desirable to match these two quantities and, if so, why one would want to achieve this by changing the preconditioner rather than, say, scale the update with a scalar factor? (Minor: The notation should also be improved in Eq. (25) - (27), since $\\Vert\\cdot\\Vert$ is used to refer to both the Euclidean norm and the \"Fisher norm\".)\n- The paper briefly cites Kunstner et al. (2019), which is an explicit critique of the interpretation of Adam as NGD, but does not really engage with the arguments in that paper.\n- Overall, the paper combines various components, that are somewhat independent of each other:\n a) introduce gradient clipping,\n b) apply momentum after preconditioning,\n c) apply preconditioning to the weight decay gradient.\nIt would be highly desirable to perform ablation studies to understand which of these changes actually matter and how they interact.\n- The quality of the empirical evaluation is a bit lacking. No error bars are given. The hyperparameter tuning protocol is somewhat unclear, e.g., FAdam uses a different epsilon value and it is not stated how this value was obtained." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Can the authors give a citation regarding using an EMA for fisher info on line 228? I’m not aware that’s been used prior to Adam.\n- How does the proposed framework justify clipping? In B.3 clipping and epsilon is mentioned through related work, but this step that has been added to the algorithm does not apppear to be justified by the theoretical framework.\n- What norms are being used in equations (25)-(27)? I’m assuming the first norm is the one induced by the Fisher Information Matrix, but then what is the other one? Euclidian?\n- Has the author tried to quantify how accurate the approximations (F)Adam is using are in a simple setting? This can help figure out if those approximations are in fact reasonable, which needs to be the case in order to claim it’s really natural gradient in disguise." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The approach of analyzing Adam from a statistical viewpoint is interesting, and while not being new (this interpretation was mentioned in the origional Adam paper) it could deserve a second look. The authors additionally show some empiracle improvments in a few settings." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work takes a statistical viewpoint of the Adam optiumization algorithm and attempts to both explain it’s performance and add improvlemtents through the lense of the natural graident algorithm. The authors argue that Adam is effectivly preconditioning with the diagonal of the Fisher Information Matrix, which leads to the algorithm’s superior performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While section 2.1 is likely relevant for doing a detailed analysis of Adam in the proposed framework, as far as I can tell that analysis does not actually take place in the paper or appendix. Given this this section feels quite out of place to me? I’m unsure of it’s value for the main message of the paper. Most readers in the optimization or statistics community are familiar with Fisher Information Matrix based methods and their connections to second order newton style algorithms, so I’m unsure of the value of introducing them using ideas form differential geometry on manifolds.\n\nThroughout this paper is repeatedly claimed that Adam is preconditioning with the diagonal of the Fisher Information Matrix, the approximation used in Adam is not the same thing in general. Adam has been connected to second order like or natural gradient like algorithms, but it is known in general that the gradient squared is not an approximation to the diagonal of the Fisher Information Matrix. While it is in expectation, in the finite data regime there are clear counter examples to this, such as in [1] which the author cites.\n\nOn line 250 the authors claim that Adam excels in classification tasks such as next token prediction, but this seems somewhat contradictory to the previous line where it claimed that CNNs are often better when trained with SGD. I agree that in many vision tasks SGD matches Adam in performance, but what is left out of the text is that that is true in most classification problems in vision, which is again a discrete output space. I’m additionally not sure of the strength of the claim that Adam is less strong in the generative setting. I’m aware of works such as [2] that claim the oposite (which the author cites) attempting to answer the question “What factors explain that Adam produces better quality solutions than SGDA when training GANs”, and propose modifications to SGD to help it compete with Adam.\n\nOverall several assertions are made that Adam fails on continuous regression targets, but I feel like there is not sufficient citation or experimentation to back that up. Adam excelling in discrete output spaces (which again is not always true, training ResNets with SGD is still very common) is not the same thing as Adam failing on continuous tasks, and needs to be justified if it is being claimed. Examples counter to this idea exist in the literature, such as the quadratic function minimized in figure 6 of [3] where Adam handily outperforms gradient descent.\n\nThe notation of some of the equations is a but unclear, for example in equation 15 while I understand the division is being coordinate wise, this should be explicit in the notation, otherwise a less familiar reader may thing we’re trying to divide a vector by another vector which is ill defined. \n\nCosmetically, the citation style is very non-standard and makes reading difficult, I would suggest the authors use a more standard method of in text citation.\n\nMinor but Adam Algorithm written in B.5 is in fact AdamW and has clipping added which was not included in the origional algorithm.\n\nThe central weakness of this paper in my opinion is it misunderstands how approximate the approximations in Adam are. The idea of the Adam update being connected to the diagonal of the Fisher information matrix is not new, it was mentioned in Kingma and Ba (2014). The optimization community has tried very hard to understand why Adam works (another weakness of this paper is there is no related work regarding the vast amount of research into understanding Adam) and this approach has not appeared to yield progress. The authors acknowledge the significance of these approximations in appendix B.3, but given the amount of work showing that these approximations are often very poor I don’t think the community can comfortably understand Adam as a natural gradient algorithm.\n\n\n[1]\nFrederik Kunstner, Lukas Balles, Philipp Hennig\n\nLimitations of the Empirical Fisher Approximation for Natural Gradient Descent\n\nhttps://arxiv.org/abs/1905.12558\n\n[2]\nSamy Jelassi, Arthur Mensch, Gauthier Gidel, Yuanzhi Li\\\n\nAdam is no better than normalized SGD: Dissecting how adaptivity improves GAN performance\n\nhttps://openreview.net/pdf?id=D9SuLzhgK9\n\n[3]\nFrederik Kunstner, Robin Yadav, Alan Milligan, Mark Schmidt, Alberto Bietti\n\nHeavy-tailed class imbalance and why adam outperforms gradient descent on language models\n\nhttps://arxiv.org/pdf/2402.19449" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Please see the weaknesses for questions and improvements." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The authors have identified an important and interesting connection between the success of the Adam optimization method and optimization by the natural gradient method.\n2. The authors proposed an explanation for why Adam's advantages primarily emerge in problems with discrete distributions.\n3. The authors established principles for using momentum, weight decay, and clipping in optimization with invariant gradients. Based on this analysis, they proposed a new method — FAdam, which demonstrates improved performance compared to traditional Adam." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors proposed a connection between the Adam optimizer and natural gradient optimization, treating the moving average of squared gradients in Adam as an estimate of the diagonal elements of the Fisher information matrix. They hypothesised that Adam's advantage over other methods might be due to its use of natural gradients; the advantage is particularly noticeable in tasks with discrete distributions, since they allow for a tighter approximation of the Fisher matrix.\n\nThe authors also offered a justification for the necessity of normalization by the square root of squared gradients to ensure basis invariance when averaging gradients in Adam. Additionally, they analyzed how momentum, weight decay, and clipping should function in the context of natural gradients and proposed new variants of Adam and Adafactor — FAdam and FAdafactor. The proposed FAdam method demonstrates superior performance for models like LLMs and VQ-VAEs, as well as in ASR tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The authors did not provide an analysis to assess the accuracy of the approximations and simplifications used in this method.\n \na) Why is the transition from sampling from $p(x|\\theta)$ to sampling from $p_{data}$ valid? In the case of an undertrained model, the distribution $p(x|\\theta)$ can differ significantly from the marginalized $p_{data}$ distribution. \n \nb) How accurate is the transition from Eq. (20) to Eq. (21) and why it is not critical to the method's effectiveness?\n \nc) How accurate is the FIM approximation throughout the hundreds of optimization steps for EMA? \n\nIf the authors could provide ablation experiments comparing Adam, FAdam, a true natural gradient method, and other methods incorporating intermediate transitions on simple tasks (e.g., CIFAR-10), it would significantly increase confidence in the results.\n\n2. In the theoretical justification for preferring discrete distributions: due to uniform sampling from $p_{data}$, discrete distributions can also provide a poor approximation of the FIM. This is because the concentration of the distribution may lie in false logits, which is common in yet-not-fully-trained networks. The score might not be large enough to yield a good approximation.\n\n3. While Amari et al. (2019) prove that unit-wise block diagonal FIM has off-diagonal blocks smaller by $\\frac{1}{\\sqrt{n}}$, the authors' interpretation appears to extend beyond the original result. Their derived claim about individual diagonal weights dominating off-diagonal weights by $\\frac{1}{\\sqrt{n}}$ (lines 186-188) may need additional justification, as it's not directly supported by Amari's work.\n\n4. The absence of confidence intervals in the experimental results prevents from being fully certain of FAdam superiority, due to marginal score improvements. Additionally, providing further experiments on a broader range of domains would strengthen the evidence of the proposed method's improved performance.\n\n5. Minor typos: in Eq. (25), it should be the square of the norm; in Eq. (19), \"approx.\" should replace the equality sign.\n\nTo summarize, I believe this paper relies too heavily on unjustified approximations and is not yet ready for the conference. However, if the authors provide additional experimental and theoretical validation, I might increase my score." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "I suggest the authors rethink their major contribution of this paper. As a paper to propose a new optimizer, it might be better to first introduce the new algorithm and present the pros of this algorithm (they could be empirical results of theoretical guarantees). Although I understand some basic illustrations about the preliminary knowledge or motivation of some terms are necessary, at least for this paper, I believe the discussion in the current manuscript should be refined. For example, I do not get any interesting insights from the discussion about the connection between log-likelihood of Gaussian distribution and $\\ell_2$ loss, as it is a basic knowledge of statistics, and seems not deeply correlated with the Fadam." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper provides a comprehensive discussion of the previous works, and the empirical results seem supportive." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors provide an explanation of the second-order moment $v_t$ of Adam from the perspective of the diagonal Fisher information, and propose a new optimizer, FAdam, by utilizing this perspective." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "At least from my perspective, I can not grasp the main contribution of this paper. I feel it is more like a technical report or a review instead of a paper. I will list my concerns as follows:\n\n1. The main conclusion of this paper is somewhat unclear, and the writing is difficult to follow. From my understanding, the authors aim to claim that Adam is a variant of natural gradient descent and introduce a new optimizer, Fadam, as I outlined in the summary. However, they spend over half of the paper discussing basic statistical properties and formulas related to Fisher information, along with extensive reviews of previous works, without presenting their own results or conclusions. In contrast, the descriptions of the algorithms and the theoretical convergence results are glossed over. While I acknowledge that some discussion of prior works is necessary, I believe it should be integrated with the proposed methods and conclusions of this paper. In summary, the lengthy review of existing literature and preliminary knowledge renders the current manuscript confusing and unappealing.\n\n\n2. The technical contribution of this paper is relatively insufficient. As a work proposing a new optimizer, the authors fail to provide a rigorous theoretical guarantee of convergence. The current version's convergence analysis disregards the effects of momentum, and even this incomplete result is derived directly from another paper. Furthermore, the statement that _“Since FAdam’s momentum is analogous to Polyak momentum, FAdam’s momentum also tightens the convergence bound. Therefore, the convergence bound for the natural gradient without momentum is looser than the convergence bound for FAdam,”_ is presented without adequate justification and is not convincing. It is unreasonable to assert that the convergence bound of one optimizer is looser than that of another without rigorous derivation." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "This paper thoroughly analyzes the Adam optimizer, connects it to natural gradient descent, and proposes an improved version called FAdam. FAdam outperforms Adam in text, speech and image domain tasks, including achieving SoTA in speech recognition." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024fadam,\ntitle={{FA}dam: Adam is a natural gradient optimizer using diagonal empirical Fisher information},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ihkxIeTFH},\nnote={under review}\n}" }, "abstract": { "value": "This paper establishes a mathematical foundation for the Adam optimizer, elucidating its connection to natural gradient descent through Riemannian and information geometry. We rigorously analyze the diagonal empirical Fisher information matrix (FIM) in Adam, clarifying all detailed approximations and advocating for the use of log probability functions as loss, which should be based on discrete distributions, due to the limitations of empirical FIM. Our analysis uncovers flaws in the original Adam algorithm, leading to proposed corrections such as enhanced momentum calculations, adjusted bias corrections, and gradient clipping. We refine the weight decay term based on our theoretical framework. Our modified algorithm, Fisher Adam (FAdam), demonstrates superior performance across diverse domains including LLM, ASR, and VQ-VAE, achieving SoTA results in ASR." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Optimizer", "Adam", "Natural gradient descent", "Second order optimization", "Information geometry", "Riemannian geometry", "Differential geometry", "Tensor calculus", "Deep learning", "Fisher Information", "Hessian", "Curvature" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/360dece4d0cbf2cddc766b4f838ee54e42d3064b.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/4ddd42b53a3a1d50ac36a41139804eee4c5478f1.zip" }, "title": { "value": "FAdam: Adam is a natural gradient optimizer using diagonal empirical Fisher information" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ikjWBs3tE
Transformers Learn Low Sensitivity Functions: Investigations and Implications
main
Active
transformers;sensitivity;grokking
interpretability and explainable AI
3;5;8;8
4;3;3;2
2;1;4;3
2;3;3;3
3;3;4;3
6
3
2.5
2.75
3.25
-0.833333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- What's the difference between sensitivity and adversarial/robustness that looks at neighborhoods?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- Useful formalization of sensitivity \n- Interesting findings about low sensitivity, robustness, and sensitivity to different parts of the input (like last token in a sequence)\n- variety of tasks for better understanding of where architecture properties come from\n- connecting between grokking and sensitivity provides a new lens into understanding and improving DNN training." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper considers implicit biases for the transformer architecture. They describe sensitivity of a function as the change in the function value averaged over all possible element-wise changes to the function input, averaged or maxed over all inputs on a hypercube. Such functions can be described using polynomials with the sensitivity connected to the degree of the polynomial. The paper proves that a linear attention transformer is biased (in the eigenvalue of the NTK sense) toward low-sensitivity functions characterized by the degree. Then they go on to generalize the notion of sensitivity for neighborhoods of general (non-boolean) inputs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- On line 141, \"where the eigenvalues are non-decreasing with the degree of the multi-linear monomials\" would be easier if it said \"eigenvalues do not decrease as the degree increases.\"" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Have you compared input sensitivity and perturbation robustness at test time? When if ever do they behave differently?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The study in the paper is quite intriguing. A few things I liked:\n * Provides a new lens on what is different about transformers\n * Demonstrates phenomena consistently across many datasets\n * Provides a new lens on grokking not captured by the weight norm" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work builds on the theoretical notion of boolean sensitivity, extending it to an empirically measurable quantity and studying it for the case of transformers. It finds that transformers have lower input sensitivity on the training data, compared to other architectures, and that this is correlated with other phenomena such as test-time robustness, sharpness of minima, and grokking." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "There were a few key places where I felt the paper overclaimed or made dubious claims, which are enough for me to not favor acceptance. In particular:\n * Lower sensitivity leads to robustness: this is basically a restatement of the claim that Gaussian data augmentation improves robustness. This is a very well-known result, the authors do say that it is in line with other results in the literature, but I feel they are understating the extent to which this is well-trodden ground (for instance, Hendrycks, one of the authors of CIFAR-10-C, has published an entire line of work on data augmentation methods for improving robustness; Gaussian noise is the simplest of these and many others work far better).\n * Perhaps more importantly, this sentence does not seem merited: \"Together, these results indicate that the inductive bias of transformers to learn functions of lower sensitivity *explains* the improved robustness (to common corruptions) compared to CNNs.\" I am not sure what \"explains\" means, but there are many other interventions that improve robustness (such as the data augmentation methods mentioned above), and some of those might have better explanatory power.\n * It is not entirely clear whether input sensitivity is a *different* phenomena than test-time robustness to perturbations. The main difference is it is computed on the training set instead of the test set --- but are there cases where these come apart, or is test-time and train-time input sensitivity always highly correlated?\n * I think the results could be interesting either way -- but if they are the same, then this is interesting mainly because it is a proxy for robustness that can be computed at training time; if they are different, then understanding the differences would be interesting." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "No ethics concerns." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "1. Related to weakness 1, how do you think the sensitivity and robustness relate to expressivity and performance in transformers?\n\n2. Lines 310-311 mention generalization capabilities of different models as a reason to investigate sensitivity during training. This made me curious, what do you think the connection between generalizability in representation learning or classification relates to generalizability in sensitivity (I think one direction of it is clear, but the other direction is not)?\n\n3. In line 394, you mention that use the same number of layers for LSTM and RoBERTa for fair comparison. How about the model size in terms of number of parameters? How many parameters in each model? And how do you think changing this could impact your results?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "**Key strength:** In addition to the general importance of developing rigorous understanding of how transformers work and why they show such remarkable properties, this paper proposes a novel perspective by looking into sensitivity. They rigorously define sensitivity and provide strong arguments on how it links to other important properties, such as robustness and generalization. They also show that it can track progress when grokking happens, which I think is an important finding and could potentially enable a series of future studies on grokking.\n\n**Other strengths:** Here are a list of other points that I commend the authors for:\n- The introduction is quite well written and motivates the main question quite well (thought it could be improved; see weaknesses). Similarly, the contributions are well explained at the end of the introduction.\n- The presentation of the paper is strong, and maintains a good balance between accessibility and rigor.\n- Propositions 2.1. And 2.2 are really interesting results on the spectral bias and sensitivity of transformers. \n- The authors explain the implications of their theory quite well.\n- The experimental design is thorough and well-tailored to validating the theory.\n- While I consider this a theoretical paper, the experiments are quite strong and cover various aspects of the paper’s main questions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper explores the inductive biases of transformers, particularly their tendency to learn low-sensitivity functions. It introduces sensitivity as a measure of how model predictions respond to token-wise perturbations in the input. By comparing transformers to other architectures like MLPs, CNNs, ConvMixers, and LSTMs across both vision and language tasks, the paper shows that transformers consistently exhibit lower sensitivity. This low-sensitivity bias is linked to improved robustness, flatter minima in the loss landscape, and hence better generalization. Additionally, the authors propose that sensitivity could act as a progress measure in training, and is linked to grokking." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I do not see any major weakness. But there could be some improvements. See my suggestions for improvement, below.\n1. While the paper clearly explains that lower sensitivity is linked to higher robustness, trade-off/connection with expressivity and performance are not discussed. There is a well-established trade-off in various contexts (see, e.g., [1-2]), and it would further strengthen the paper to discuss this.\n\n2. Though I think the introduction is quite well-written, I think it under-emphasizes the findings of the paper on the role of sensitivity analysis. The authors conduct a rigorous analysis of the transformers sensitivity and use that to clarifies some of the important properties of transformers as I mentioned for the strengths, but while doing so, they also show, quite rigorously with strong theory and experiments, how sensitivity analysis could be used to understand generalization, grokking, etc. Near the end of the paper this realization caught my attention, and the authors actually do point this out more clearly in the Conclusion, but I think this can be better emphasized in the Introduction.\n\n3. I suggest the authors bring the Limitation section from the appendix to the main paper. The limitations are not discussed in the main paper, while it is always important to discuss them.\n\n4. This is a rather minor point and it might be a matter of taste: Do sections 5 and 6 really need to be separate sections? It seems like the findings are generally similar, and they could be merged in one section of empirical analysis of vision and language models.\n\n\n**References**\n\n[1] Zhang, H., Yu, Y., Jiao, J., Xing, E., El Ghaoui, L., & Jordan, M. (2019, May). Theoretically principled trade-off between robustness and accuracy. In International conference on machine learning (pp. 7472-7482). PMLR.\n\n[2] Raghunathan, A., Xie, S. M., Yang, F., Duchi, J., & Liang, P. (2020). Understanding and mitigating the tradeoff between robustness and accuracy. arXiv preprint arXiv:2002.10716." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "* Following up on my previous comment, the authors should clarify if the noising procedure is applied on patches (pixels) or token representations. Fig. 1 contradicts the text.\n * Also, how is noising applied on CNNs?\n\n* How is $\\sigma$ important, and why were different $\\sigma$ chosen for the experiments in Section 4. I personally find it a drawback that one needs to find a right $\\sigma$, and that using different ones the conclusions might change. Also, bold claims are provided with a single (different) sigma per dataset, which raises some questions.\n\n* ViT/LLMs might produce different token scales, but $\\sigma$ is kept fixed. This can impact strongly some tokens and leave others almost noise-less. I find this also a negative point of this algorithm, since some \"topics\" might by-pass the noising.\n\n* How is a single attention representative of a large Transfomer in Section 3.1. I would ask the authors to elaborate on this.\n * Additionally, why have a linear layer $U$ after another linear layer $W_v$, since the composition of both is already a linear layer?\n\n* In Fig. 4, only the training accuracy is provided. What about the test accuracies? It is known that models achieve perfect train accuracy, but the test accuracy might be very different though. Does the test accuracy correlate with the sensitivity (measured on train data as you already do)?\n\n* About the claim in L347 *_\"This shows that the observations on small-scale models studied in this section transfer to large-scale pretrained models.\"_*. By increasing scale, the sensitivity of a conv model has gone down to 0.0342, which is much lower than 0.0829 for ResNet-18. Also, ViT went up from 0.0014 to 0.0191. It would be fair to conclude that scaling up brings sensitivities closer, which would mean that small-scale does not transfer to large-scale. Also, one could go much larger in scale (larger ViT, larger datasets) and see if the trend is still maintained or sensitivities are even closer. \n\n* Claims in Section 5 are obtained with one Language model (Roberta) and one LSTM on 2 small datasets. I cannot agree with the claims being generic for *Language Tasks* with this setup. Moreover knowing that current LLMs have much different properties than LMs used in 2019 (Roberta)." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "**Originality:**\n\nFocusing on sensitivity starting from the Boolean formulation is original. I also found the experiment on a synthetic vocabulary (3.1) original.\n\n**Clarity:**\n\nThe paper is well written, with clear language. The mathematical notation and formulation is also easy to read.\n\n**Significance:**\n\nThe study of sensitivity in current models is important for interpretability as well as to design better training strategies." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work studies the sentitivity of functions defined by different deep learning architectures, comparing the specific case of Transformers with CNNs and Mixers. The work stems from previous work tha has studied sensitivity with Boolean inputs, and derives a formulation for token-based models. The authors make a connection between sensitivity and robustness, show how ViTs are less sensitive than other architectures and also show how sensitivity can be used for grokking analysis.\nExperiments on synthetic data are provided, as well as experiments using ViT on small datasets (CIFAR, SVHN, ImageNet) and LLMs on 2 datasets (MRPC and QQP)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**Originality:**\n\nWhile sensitivity study has its originality, many previous works have studied sensitivity in many ways, for example by understanding the effects of image augmentations (see contrastive learning literature).\n\n**Quality:**\n\nThe experiments provided are either synthetic or use small models/datasets. This makes the claims in the paper weaker in my opinion. For example:\n* Results in Section 3 use synthetic data and a single attention layer. I would argue that, while still interesting, these experiments might not transfer to full models with several layers and multiple attention heads.\n * Related to this experiment, other research has been carried out analyzing spurious correlations. For example, the work by Robert Geirhos (among others) has already shown that CNNs tend to learn from the easiest cues available. In the experiments in Section 3.1, these \"easy\" cues would be the sparse tokens. Once they become uninformative, the next available (but harder) cue are the frequent tokens. \n\n> Geirhos, Robert, et al. \"Shortcut learning in deep neural networks.\" Nature Machine Intelligence 2.11 (2020): 665-673.\n\n> Geirhos, Robert, et al. \"ImageNet-trained CNNs are biased towards texture; increasing shape bias improves accuracy and robustness.\" arXiv preprint arXiv:1811.12231 (2018).\n\n* Results in Section 4 use small datasets (CIFAR, SVHN) and arguably a medium size dataset nowadays (ImageNet). The models used (Vit-simple/small) are far from real scenarios nowadays, and the compared architectures are also small (3-layer CNN in for example).\n\n* Results in Section 5 use a Roberta model (2019) which does not have the same properties as current LLMs. Also, this model is trained from scratch on small tasks, which also does not transfer to current abilities of LLMs.\n\nIn several cases, bold conclusions are extracted from a single model / single dataset experiment, with which I cannot agree. For example, the claim in L357 *_\"Thus, transformers learn lower sensitivity functions compared to MLPs, ConvMixers, and CNNs\"_* is validated with a 3-layer CNN on a small dataset like SVHN.\n\n**Clarity:**\n\n* It is not clear how the noising strategy is performed. The text mentions that _tokens_ are polluted with noise, however Fig 1 shows the noise applied to the pixel patch and says *_\" the original image and the corrupted image are fed into the same neural network\"_* (which implies that noise is applied at pixel level). The authors should clarify this important aspect.\n\n* It is also not clear how noising is applied to CNNs (which are not patch/token based).\n\n* Proposition 2.1 is harder to parse than the rest of the text, and it is hard to understand why it is important for the paper.\n\n**Significance:**\n\nWhile the objective of the paper is significant, the results provided and the size of the experiments laregly diminish the impact of this work." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Transformers have lower sensitivity than alternative architectures, such as LSTMs, MLPs, ConvMixers, and CNNs. Low-sensitivity bias correlates with improved robustness and can serve as a progress measure for grokking." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024transformers,\ntitle={Transformers Learn Low Sensitivity Functions: Investigations and Implications},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ikjWBs3tE},\nnote={under review}\n}" }, "abstract": { "value": "Transformers achieve state-of-the-art accuracy and robustness across many tasks, but an understanding of their inductive biases and how those biases differ from other neural network architectures remains elusive. In this work, we identify the sensitivity of the model to token-wise random perturbations in the input as a unified metric which explains the inductive bias of transformers across different data modalities and distinguishes them from other architectures. We show that transformers have lower sensitivity than MLPs, CNNs, ConvMixers and LSTMs, across both vision and language tasks. We also show that this low-sensitivity bias has important implications: i) lower sensitivity correlates with improved robustness; it can also be used as an efficient intervention to further improve the robustness of transformers; ii) it corresponds to flatter minima in the loss landscape; and iii) it can serve as a progress measure for grokking. We support these findings with theoretical results showing (weak) spectral bias of transformers in the NTK regime, and improved robustness due to the lower sensitivity." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "transformers", "sensitivity", "grokking" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/4b6bfa86275305287eeab30ae0320b2dcb5c1124.pdf" }, "presentation": null, "primary_area": { "value": "interpretability and explainable AI" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Transformers Learn Low Sensitivity Functions: Investigations and Implications" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4j9plQoOH1
LongViTU: Instruction Tuning for Long-Form Video Understanding
main
Withdraw
vision language models;instruction-tuning;long-form video understanding
datasets and benchmarks
Rujie Wu;Xiaojian Ma;Hai Ci;Yue Fan;Yuxuan Wang;Haozhe Zhao;Qing Li;Yizhou Wang
~Rujie_Wu2;~Xiaojian_Ma1;~Hai_Ci1;~Yue_Fan2;~Yuxuan_Wang4;~Haozhe_Zhao1;~Qing_Li1;~Yizhou_Wang1
3;5;5;6
4;4;4;4
3;2;3;3
2;2;2;2
2;3;2;2
4.75
4
2.75
2
2.25
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper is easy to follow, and the experiments are clearly described.\n- The dataset is of high quality, featuring a large number of QA pairs and encompassing a variety of diverse scenarios." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces LongViTU for video understanding, which comprises approximately 121k question-answer pairs across 900 hours of video content, focusing on long-context videos that require rich knowledge and reasoning. The authors propose a hierarchical pipeline for generating high-quality QA pairs with explicit timestamp labels, catering to diverse real-world scenarios. LongViTU is curated to support fine-grained and open-ended QA. The paper also presents experiments demonstrating the performance gap between open-source and commercial models on this benchmark and the effectiveness of SFT on LongViTU." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Figure 1: The icons, while visually appealing, come across as unprofessional and occupy space that could be better utilized to present more information.\n- Ablation Studies: The paper lacks ablation studies for different-level captions. For instance, it would be beneficial to know if event-level captions can be skipped without significant detriment.\n- Results: Additional results are necessary to clarify the performance of different Multi-modal Large Language Models (MLLMs) on LongViTU videos with varying durations.\n- Comparison with ShareGPT4Video[1]: The authors of ShareGPT4Video present a progressive framework that generates detailed captions for diverse videos. In contrast, LongViTU focuses solely on ego-centric videos due to its dependence on human annotation, which potentially limits its application and robustness for general QA, as evidenced in Table 3.\n\n---\nReference:\n\n[1] Chen, Lin et al. “ShareGPT4Video: Improving Video Understanding and Generation with Better Captions.” ArXiv abs/2406.04325 (2024): n. pag." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. How were the specific parameters for the sliding window (five segments) determined? What is the sensitivity of the results to changes in this parameter?\n2. What is the inter-annotator agreement (IAA) for the human annotations used in the Ego4D dataset, and how does this affect the quality of LongViTU?\n3. What are the computational costs associated with generating and processing LongViTU?\n4. Can you provide a more detailed analysis of the biases present in the generated QA pairs?\n5. How does the performance of the fine-tuned models change with different sizes of the LongViTU training set?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. LongViTU explicitly addresses the limitations of temporal context, length, and fine-grained question types from the perspective of sft. The hierarchical pipeline for automatic dataset generation is a sound procedure to create long-form annotations from bottom to top. Its sheer scale of the dataset (~900 hours of video) and its diversity in terms of scenarios and question types are decent. The use of Ego4D ensures real-world relevance.\n2. The paper includes a thorough quantitative evaluation on LongViTU and several benchmark datasets, demonstrating the effectiveness of the dataset and highlighting the challenges it presents. The use of GPT-4 for scoring is a reasonable approach given the open-ended nature of the QA pairs. Qualitative examples further illustrate the dataset's capabilities. The availability of the dataset, fine-tuned models, and code is a valuable contribution to the community." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper introduces LongViTU, a novel large-scale dataset (~121k QA pairs, ~900h videos) for long-form video understanding. The authors address the limitations of existing video question-answering (VQA) datasets by focusing on several key aspects: diverse real-world scenarios (leveraging Ego4D), explicit timestamp labels for QA-related events, long average certificate length (4.6 minutes), fine-grained categorization of QA pairs (spatiotemporal understanding, episodic reasoning, commonsense inference), and open-ended, precise QA generation. A hierarchical pipeline, employing LLMs (primarily GPT-4) at multiple stages (hierarchical video tree construction, long-form QA generation, self-revision), is used for automatic dataset creation. Experiments demonstrate the challenges posed by LongViTU to existing video language models (VLMs), showing a performance gap even between open-source and commercial models. Fine-tuning on LongViTU improves performance on both in-distribution and out-of-distribution benchmarks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The reliance on LLMs (GPT-4) throughout the pipeline raises concerns about potential biases inherited from the pre-training data of these models. Moreover, a hierarchical pipeline may cause error cumulation, making the bias even worse. A thorough analysis of potential biases in the generated QA pairs is missing. \n2. While self-revision is employed, a more robust human evaluation of the dataset quality would strengthen the paper's claims. The current human evaluation seems limited to Appendix B.\n3. Experiments need improvements. The number of models evaluated in the benchmark is too limited, and some of the current long video large language models, such as LongVA, LongVILA, have not been included in the evaluation. The model performance used to validate the training dataset's effectiveness is too weak (for instance, LLama-VID performs below random chance on VideoMME), and the improvements achieved after fine-tuning are relatively minor." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.\tIs it possible that using GPT-4 for evaluation may struggle to distinguish fine-grained semantics? For instance, if sentences differ by only one or two keywords but convey significantly different meanings, how would GPT-4 rate them in such cases?\n\n2.\tCan LongViTU still deliver substantial performance improvements on models that perform better?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.\tThe approach of organizing video content into a hierarchical tree structure is innovative. This method allows for the generation of question-answer pairs that capture both spatial and temporal details, which is a creative extension of existing video understanding frameworks.\n2.\tThe dataset provides fine-grained categorization of questions, which is crucial for advancing the understanding of complex video content and adds depth to the quality of the dataset." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces LongViTU, a large-scale dataset designed for long-form video understanding, featuring approximately 121k question-answer pairs across 900 hours of video content. It addresses challenges in long-form video understanding by offering a dataset with diverse real-world scenarios, explicit timestamp labels, long certificate lengths, fine-grained categorization, and open-ended precise QA pairs. LongViTU is curated to facilitate instruction tuning for long-form videos, involving the organization of video content into a hierarchical tree and incorporating self-revision mechanisms to ensure high-quality QA pairs. The authors primarily validate the effectiveness of LongViTU through experiments conducted on two different models." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tIn Table 2, it can be observed that there is a lack of differentiation in the benchmark. The performance gap between the best-performing Gemini-1.5-Pro and the other models is not evident. According to the reviewer, in most existing benchmarks, Gemini-1.5-Pro demonstrates a significant performance advantage over Video-LLaVA.\n\n2.\tThe proposed benchmark employs GPT-4 for assessment, which may introduce additional bias. \n\n3.\tThe validation method employed was released some time ago, and its baseline performance is no longer highly competitive compared to more recent models. It remains unclear whether it can still deliver significant performance improvements on more recently proposed models." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see weakness section." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1 Topic is good. Long-form video understanding is a challenging but important problem. Developping a benchmark for instruct tuning and evaluation is critical in this problem.\n\n2 Experiments are sufficient. The experimental studies are interesting to show the challenges and potentials of this benchmark." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this work, the authors propose a LongViTU benchmark for Long-Form Video Understanding. Basically, they leverage Ego4D as data source, and develop a three-stage pipeline for QA annotation and revision. First, it builds up a hierarchical video tree to describe videos in different temporal scales. Second, they apply a sliding window approach to any subtree, and generate QA of subtree by GPT4. Third, they use GPT-4 to make a thorough revision of the generated question-answering pairs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1 This benchmark is based on EGO4D. Hence, the annotation would be similar to EgoTaskQA. As shown in Table 1, the difference is the increasing scale of data set and the newly-added timestep annotations. Is such timestep annotation important or not? Are there any expermental results to show its impact on your benchmark ?\n\n2 The hierarchical video tree style design is similar to [MoVQA: A Benchmark of Versatile Question-Answering for Long-Form Movie Understanding, arXiv:2312.04817]. \n\n3 The paper writing should be refined. The structure is OK, while the content is not quite easy to read." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We propose a large-scale instruction-tuning dataset for long-form video understanding." }, "_bibtex": { "value": "@misc{\nwu2024longvitu,\ntitle={LongVi{TU}: Instruction Tuning for Long-Form Video Understanding},\nauthor={Rujie Wu and Xiaojian Ma and Hai Ci and Yue Fan and Yuxuan Wang and Haozhe Zhao and Qing Li and Yizhou Wang},\nyear={2024},\nurl={https://openreview.net/forum?id=4j9plQoOH1}\n}" }, "abstract": { "value": "This paper presents LongViTU, a large-scale (~121k QA pairs, ~900h videos), automatically generated dataset for long-form video understanding. Our key idea is inspired by the success of Large Language Models (LLMs) and Multimodal Language Models (MLMs) that are fueled by machine-generated instruction-following data (*e.g.*, InstructGPT, LLaVA). We developed a *systematic* approach to produce massive question-answeringing pairs tailored to virtually unbounded long videos by organizing them into a ***hierarchical tree***, incorporating ***self-revision*** mechanisms to guarantee high quality. We curate LongViTU for each QA pair: 1) involves a long context (average *certificate length* of 4.6 minutes); 2) requires rich knowledge and condensed reasoning (commonsense, causality, planning, *etc.*); 3) explicit labels the timestamps of relevant events throughout the entire video. Furthermore, LongViTU provides a benchmark to facilitate future research in instruction-following for long-form videos. Our experiments first reveal the performance gap between open-source video MLMs and their commercial counterparts (*e.g.*, Gemini-1.5-Pro) on this benchmark. Supervised Fine-Tuning (SFT) on open-source models led to Video-LLaVA achieving the best performance, with a GPT-4 score of $50.7$, closely following $52.3$ by the leading closed-source model Gemini-1.5-Pro, underscoring the substantial challenge posed by our benchmark. Further SFT on LongViTU with Video-LLaVA resulted in improvements of $30.7$% on the In-Distribution (ID) benchmark EgoSchema; $12.9$% and $0.6$% on the Out-of-Distribution (OOD) benchmarks WorldQA and VideoMME, respectively. These outcomes demonstrate the effectiveness and robust OOD generalizability of our proposed instruction-tuning scheme for long-form video understanding. The dataset, SFT models, and code are publicly available on the anonymous page [LongViTU](https://longvitu.github.io)." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Rujie_Wu2", "~Xiaojian_Ma1", "~Hai_Ci1", "~Yue_Fan2", "~Yuxuan_Wang4", "~Haozhe_Zhao1", "~Qing_Li1", "~Yizhou_Wang1" ] }, "authors": { "value": [ "Rujie Wu", "Xiaojian Ma", "Hai Ci", "Yue Fan", "Yuxuan Wang", "Haozhe Zhao", "Qing Li", "Yizhou Wang" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "vision language models", "instruction-tuning", "long-form video understanding" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "wu|longvitu_instruction_tuning_for_longform_video_understanding" }, "pdf": { "value": "/pdf/e663a2eb9e041444826a666f95acc8764c6e736b.pdf" }, "presentation": null, "primary_area": { "value": "datasets and benchmarks" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "LongViTU: Instruction Tuning for Long-Form Video Understanding" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4jBJ6JphYM
Procedural Fairness Through Addressing Social Determinants of Opportunity
main
Active
Procedural Fairness;Social Determinants of Opportunity;Causal Fairness;Structural Justice
alignment, fairness, safety, privacy, and societal considerations
3;3;5
4;4;2
3;2;3
1;2;2
2;2;3
3.666667
3.333333
2.666667
1.666667
2.333333
-1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "What are the authors thoughts of the critical reflection of the approach (see weaknesses)?\nWhy is this paper a good fit for specifically ICLR?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The discussion of the actual effects of different approaches to achieve \"fairness\" is discussed, which is often not considered enough in our field." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper discusses the consideration of \"social determinants of opportunity\" such as geographical locations for algorithmic fairness." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "First of all, I am not sure if this conference is a good fit for this paper/topic since it is from my perspective hardly at all concerned with \"learning representation\". It is more a general societal consideration about how fairness could be achieved.\n\nWhile the claim of the paper is to discuss the \"social determinants of opportunity\" in general, the discussion focusses very much on a single use case, i.e., university admissions.\n\nThe paper is written in a very US-centric way, specifically considering the legal situation.\n\nThe case considerations in Section 4 often (e.g., Section 4.3.) come to conclusions that are quite trivial. E.g., that taking the top-x % per region increases the share of \"weaker\" regions was literally my first thought. The accompanying formulas appear to just make a trivial insight more sophisticated.\n\nThe authors should more critically reflect on their approach. For example,\n(i) even if \"academic preparedness\" is caused by certain external factors, isn't academic preparedness still a key factor to a succesful university curriculum? If someone is not well prepared for university, they should not be admitted - that should be at the core of all admission procedures\n(ii) the legal implications of adjusting for \"social determinants of opportunity\" should be considered, specifically if this correlates with sensitive attributes such as race. \n(iii) trying to form groups again beyond sensitive attributes - again - introduces new sources of unfairness. For example \"poor\" students growing up in \"rich\" regions. Also, if this kind of admission procedure would gain traction, it would also be possible to trick procedures, e.g. for \"rich\" people renting temporarily to appear to be from a \"poor\" region.\n\nThe assumptions in the paper appear to be somewhat arbitrary. E.g., why assume the gamma parameterization in 4.3., and how is this justified?\n\nThe writing of the paper should also be improved. For example, what is the purpose of Section 2.1. For the contents of the paper?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Are the regions in the experiment latent? Region 1 and 3 are almost identical, calling into question the identifiability of the model. Also, if regions don’t correspond to geographies or social networks then how can they capture social determinants of opportunity? \n\nNotes:\n\n> “Specifically, by definition of causality, this edge asserts that there is a difference in the distribution of education status, when we “intervene” on individual’s race while keeping all other things unchanged”\n\n“all other things” meaning every other variable in their causal graph, not literally every other possible thing. Since these graphs typically only use a handful of features, I don’t think this edge is an endorsement of racial essentialism - it just summarizes dozens of effects that the model is too coarse to model explicitly.\n\n>“If a certain edge or path in the causal model does not reflect an actual real-world causal process, subsequent causal fairness analyses based on causal effects may not provide informative conclusions.”\n\nThis is certainly true, but to my knowledge not a single causal graph in history has ever actually described a real-world causal process where humans were involved. It’s extremely difficult to establish single treatment effects, let alone a network of them." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper is clearly arranged and easy to follow. This paper is also laudable for trying to raise the salience of geographic and community influences on opportunity over a reductive focus on ethnicity." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper develops a model of the interactions between ethnicity, academic preparedness, and \"social determinants of opportunity\", which capture socio-geographic influences on academic preparedness. The model is used to study different college admissions policies both in theory, and applied to a dataset of UC Berkeley admissions." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Ultimately, the theoretical results in this paper do not provide novel insight, and the empirical results aren't very interesting or plausible. The model is used to show that:\n\n1. Quota-based affirmative action harms disadvantaged members of majority groups.\n2. \"Plus factors\" for being from an underrepresented group benefit advantaged members of that group more than disadvantaged members.\n3. \"Top-percent\" policies that are blind to ethnicity reallocate opportunity to regions with less of it.\n\nAll three of these findings are well-known, and have been part of the debate around these policies for decades. The model doesn't provide extra insights into these policies. \n\nWhen the model is deployed on real data, it also doesn't provide insights. It seems as though the admissions data from Berkeley is too censored to study the impacts of social determinants of opportunity, since it doesn't include anything about an applicant's geography beyond whether they are in-state. The regions inferred by the model don’t make a lot of sense given what we know about California’s ethnic geography (eg they don't show any signs of the racial segregation induced by California's restrictive housing policies)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "How does modeling University of California admissions data via the presented framework differ experimentally from past methods? What are possible extensions of this framework in developing more holistic admissions systems? A theoretical analysis of what this kind of admissions system could look like could provide further insight into the extensions of this model." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The authors seek to model fairness in college admissions by disentangling variables that implicitly model each other, which provides an interesting framework for considering the intersectionality of factors that influence an individual. When applied to college admissions and academic preparedness, the authors provide a convincing argument for abstracting out social determinants of opportunity and studying the underlying framework and its impact on the individual. Further, the authors demonstrate various applications of their framework in Section 4 in studying historical admissions systems." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work explores incoporating the concept of social determinants of opportunity, variables that relate to an individual's academic success causally and potentially implicitly. The authors deviate from past work by modeling implicit relationships between these variables rather than simplified relationships and futher explore adding previously omitted variables. Then, framing academic preparedness as an optimization problem, the authors find a correlation between race and social determinants of opportunity, using GPA as an estimation of academic preparedness and analyzing data from the University of California's admissions data. As an analysis of existing data, this work proposes modeling protected characteristics and studying the influence of contexts and environments on the individual for fairness analysis." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "While there are limitations in studying observational data, this paper could have benefitted from a further analysis of the University of California dataset; the authors do acknowledge the limitations of summary statistics but further discussion of the dataset and analysis on other datasets could have provided more support to the experimental section of this paper. The authors briefly discuss their experimental findings but the three separate graphs in Figure 3 could have benefitted from further discussion, particularly in relation to each other and how the correlation between race and social discriminators of opportunity correlates with understanding academic preparedness in a region. Potentially interleaving the methods and providing experimental results for the modeling of past admissions systems could have provided more tractable examples of how this framework compares to prior work." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We approach procedural fairness by explicitly considering influences on individuals from social determinants of opportunity." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024procedural,\ntitle={Procedural Fairness Through Addressing Social Determinants of Opportunity},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4jBJ6JphYM},\nnote={under review}\n}" }, "abstract": { "value": "_Social determinants of opportunity_ are variables that, while not directly pertaining to any specific individual, capture key aspects of contexts and environments that have direct causal influences on certain attributes of an individual, e.g., environmental pollution in an area affects individual's health condition, and educational resources in an neighborhood influence individual's academic preparedness. Previous algorithmic fairness literature often overlooks _social determinants of opportunity_, leading to implications for procedural fairness and structural justice that are incomplete and potentially even inaccurate. We propose a modeling framework that explicitly incorporates _social determinants of opportunity_ and their causal influences on individual-level attributes of interest. To demonstrate theoretical perspectives and practical applicability of our framework, we consider college admissions as a running example. Specifically, for three mainstream admission procedures that have historically been implemented or are still in use today, we distinguish and draw connections between the outcome of admission decision-making and the underlying distribution of academic preparedness in the applicant population. Our findings suggest that mitigation strategies centering solely around protected features may introduce new procedural unfairness when addressing existing discrimination. Considering both individual-level attributes and _social determinants of opportunity_ facilitates a more comprehensive explication of benefits and burdens experienced by individuals from diverse demographic backgrounds as well as contextual environments, which is essential for understanding and achieving procedural fairness effectively and transparently." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Procedural Fairness", "Social Determinants of Opportunity", "Causal Fairness", "Structural Justice" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/79472c172a67c4eff20ad02a9e0d408ee2e35d31.pdf" }, "presentation": null, "primary_area": { "value": "alignment, fairness, safety, privacy, and societal considerations" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/db3b6214174935b544767e67d701f69bb9f08db1.zip" }, "title": { "value": "Procedural Fairness Through Addressing Social Determinants of Opportunity" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4jzjexvjI7
Regret measure in continuous time limit for a stochastic Multi-armed bandit problem
main
Active
Stochastic multi-armed bandit;Risk-sensitive regret;Hamilton-Jacobi-Bellman equation;Continuous time-limit
reinforcement learning
1;3;3
1;4;3
1;2;2
1;2;1
1;1;1
2.333333
2.666667
1.666667
1.333333
1
0.944911
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 1 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "Please refer to the weaknesses." }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "I am unable to provide a comprehensive scientific review of the paper, and thus I cannot identify specific strengths. Please refer to the weaknesses below." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper aims to analyze multi-armed bandit problems using differential equations and introduces a new risk measure for the analysis." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper has significant issues with presentation. Not only are there numerous grammatical errors, typos, and punctuation mistakes, but many sentences are incomplete and seem disconnected from the surrounding context. Additionally, the writing lacks a clear logical flow, making it difficult to follow the argument.\n\nFurthermore, it appears that the authors have not adhered to the official ICLR style guidelines.\n\nDue to these issues, I am unable to provide a more detailed review." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. what is the main contribution of the paper?\n\n2. what is exactly the problem studied?\n\n3. why studying the continuous-time limit is relevant for bandit problems?\n\n4. How should we interpret the main result Theorem 1 and understand its practical relevance?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Considering continuous-time limit of regret measures in continuous time." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies a class of stochastic multi-armed bandit problems with a risk-sensitive regret measure within a continuous limit setting" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The presentation is not clear. \n\nThe paper's contribution and the significance of the problem are not clearly articulated in the Introduction and the main text. \n\nThe English in the paper could benefit from some further refinement or editing to enhance clarity and coherence." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "See Weaknesses." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The way to convert the MAB problem to a PDE problem is interesting and meaningful. The work compares different concepts, like frequentist and Bayesian settings making it easy to understand the applicability of the method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper considers the traditional multi-armed bandit problem with a new risk measure. The authors continuize the time through rescaling and use PDE to find the optimal policy. In the meantime, the authors use some simulations to verify their results." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The writing needs to be improved. There are a lot of typos which make it hard to understand the paper. \n\n2. There are no real-world applications provided by the author regarding why this new risk measure is important, reducing the credibility and impact of the paper.\n\n3. The usage of MDP seems improper. In your setting, $\\nu$ seems to be fixed and only $s$ and $q$ are changing. However, there is no need to learn the transition kernel as if you choose an action $a$, corresponding $q$ will be increased by 1. Then, it reduces to learning the reward function which is the same as in traditional MAB literature and so people usually don't call it MDP. It's more reasonable to use your framework to consider the case that $\\nu$ is varying and say it's MDP.\n\n4. The notations are messy. For example, why $V_{i+1}$ only relies on $R_i$? And you use a very strong assumption but only hide it in the Lemma 1.\n\n5. The Theorem 1 is unclear. What is zero? Why do you use a bracket but link it to nothing?\n\n6. In your numerical study, how do you implement UCB and TS? Do you adjust their definitions of regrets to your new risk measure? If not, they are not comparable. Otherwise, it's better to mention how you set the baseline in detail." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024regret,\ntitle={Regret measure in continuous time limit for a stochastic Multi-armed bandit problem},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4jzjexvjI7},\nnote={under review}\n}" }, "abstract": { "value": "We study a class of stochastic multi-armed bandit problems with a risk-sensitive regret measure within a continuous limit setting. This problem is interesting when optimizing the expected reward is not the foremost objective, and the problem horizon is long. Through scaling the state parameters, including the number of pulls and cumulative reward for each arm we study the bandit problem with infinite horizon, we delineate such risk using a Hamilton-Jacobi-Bellman equation with quadratic growth. Using this approach, we establish an explicit form of the optimal policy associated with the considered risk. As an application, we present examples where the results obtained in continuous time offer insights into the optimal policy for each case. Finally, numerical experiments confirm the theoretical results are presented." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Stochastic multi-armed bandit", "Risk-sensitive regret", "Hamilton-Jacobi-Bellman equation", "Continuous time-limit" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/cf0559a93e3cce480271d887b4e3a5098d667b29.pdf" }, "presentation": null, "primary_area": { "value": "reinforcement learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Regret measure in continuous time limit for a stochastic Multi-armed bandit problem" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ktJJBvvUd
Multi-objective antibody design with constrained preference optimization
main
Active
antibody design;diffusion generative model;preference optimization
applications to physical sciences (physics, chemistry, biology, etc.)
5;5;5
3;4;3
2;3;2
3;3;3
2;3;3
5
3.333333
2.333333
3
2.666667
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. In the visualization part, I don't see why results come from dyMEAN and DiffAb do not satisfy constraints like Stability, Self-association. Can you explain this in detail?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Multiple objects are considered to improve the quality of generated antibodies. Although not validated in wet lab, these kinds of properties are essential.\n2. This work does not simply integrate DPO only optimizing binding affinity, which broadens the horizons for similar works." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper focuses on some important properties, such as non-antigen binding specificity and low self-association, and optimizes the model in a DPO-like manner. What differs it from other DPO-based methods lies in two forms, the optimization targets and continuous rewards. With a two stages training framework, the proposed AbNovo is capable of capturing generalized protein information and constraining the generated results with desired properties. Experiments also support the effectiveness that generated antibodies are well designed." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Rosetta energy is used as an alignment metric. It is well-known that forcefield energies have a weak correlation with measured binding affinity, typically around 0.3 [1,2]. This may lead to the totally wrong direction.\n2. Limited antibody optimization experiments, which should be a major highlight of antibody design. Maybe some further experiment may alleviate this, like in [3,4].\n\n[1]Luo S, Su Y, Wu Z, et al. Rotamer density estimator is an unsupervised learner of the effect of mutations on protein-protein interaction[J]. bioRxiv, 2023: 2023.02. 28.530137.\n\n[2]Ambrosetti, F., Piallini, G., & Zhou, C. Evaluating Forcefield Energies in Protein Binding Studies. National Center for Biotechnology Information, 2020.\n\n[3]Kong X, Huang W, Liu Y. End-to-end full-atom antibody design[J]. arXiv preprint arXiv:2302.00203, 2023.\n\n[4]Shitong Luo, Yufeng Su, Xingang Peng, Sheng Wang, Jian Peng, and Jianzhu Ma. Antigen-specific\nantibody design and optimization with diffusion-based generative models for protein structures.\nAdvances in Neural Information Processing Systems, 35:9754–9767, 2022." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. The announcement of \"The first deep generative model for multi-objective antibody design\" in summarized contributions, AbDPO also supports multi-objective optimization.\n\n2. In energy evaluation, if you want to assess the energy performance of the designed backbone, energy minimization is necessary for the side chains while keeping the backbone structure unchanged, and then calculate the energy. If you wish to evaluate the antibody's performance in real experiments (which implies the CDR region's structure might not maintain the designed configuration), you can use multi-chain supporting folding models like AlphaFold3 to predict the binding structure. When calculating energy, does the relaxation you used optimize only the side chain conformations, or does it also alter the main chain structure? If it's the latter, are these experiments intended to demonstrate that AbNovo can generate a better initial structure for Rosetta relaxation?\n\n3. Does the optimization of these physical properties contribute to some chemical validity? For example, does the peptide bond length get closer to the actual length?\n\n4. The standard deviation of the physical energy needs to be presented.\n\n5. The AAR performance is excessively high, and it's necessary to check whether the training data of the protein language model contains samples similar to the test set.\n\n6. I am curious about how many amino acids have mutated in those designed antibodies that outperform natural ones (at least in binding energy).\n\n7. The task setting of dyMEAN is different from others, including AbNovo. dyMEAN does not provide the real FR structure, making direct comparison somewhat unfair. Additionally, how is it achieved to use dyMEAN to generate 128 antibodies for an antigen?\n\n8. Calculating RMSD on the aligned structures seems somewhat unreasonable. Typically, for two rigid bodies that can freely undergo SE(3) transformations, alignment is performed first, followed by RMSD calculation. However, in the setting of this paper, the FR region is given, meaning the CDR region cannot undergo SE(3) transformations independently, thus requiring a direct RMSD calculation." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Achieved performance on physical metrics that significantly surpasses other methods.\n\nIntroduced a structure-aware protein language model and demonstrated its usefulness for antibody design.\n\nProvided rigorous theoretical derivation" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents an antibody design method, AbNovo, achieved antibody through multi-objective optimization. By introducing a structure-aware protein language model and employing constrained preference optimization with continuous rewards, AbNovo surpasses previous methods in both reference-based metrics and reference-free metrics (i.e., biophysical properties)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Seems to be an updated version of AbDPO, somewhat heavier but showing better performance.\n\nThe task setting is overly simplistic. Although the structure of the antibody's FR region is relatively conserved and can be considered known, the binding pose between the antibody and antigen is typically unknown. However, given that the main goal of this work is to propose a new method for antibody optimization, this limitation is understandable." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "•\tin Section 4.2 you state that when “we incorporated all constraints into the optimization objective by taking a weighted average” a “drop in performance” is observable. However, the corresponding results show an improvement wrt. the “All Constraints” metric. Could you elaborate on that? \n \n•\tIn Table 2, we can observe that AbNovo (base) sometimes exhibits favorable scores than AbNovo. Is there a tradeoff between fulfilling constraints and achieved AAR/RMSD? \n \n•\tIs there a reason dyMEAN is not included in Figure 4 and AbX not in Figure 2 respectively?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "•\tThe authors provide many theoretical derivations and analysis. \n \n•\tThe authors include many baselines for their experiments which shows the good performance of their proposed method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this manuscript, the authors present AbNovo, a method combining constrained preference optimization with generative models for multi-objective antibody design. First, an antigen-conditioned generative model is trained to co-design antibody structure and sequence. Then this model is fine-tuned to maximize binding affinity to a target antigen while enforcing constraints on properties such as non-specific Binding, Self-association, and Stability. In their experiments, the authors compare their method to many recent works and show an improved performance." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "•\tI would suggest introducing a background section, as there are many things in this manuscript that would benefit from a proper introduction.\n \n \t∘\tbase model, reference model, policy model could be introduced, e.g. with an intuition. These are introduced in Figure 1, but do not come with a description on how they are related. Only in Algorithm 1, the reader is shown that those are updated iterations of the very same model.\n\n \t∘\tdelta and G in Equation 1 is never introduced, but instead taken from Campbell et al.\n\n \t∘\tCTMC - Continuous Time Markov Chain is never defined.\n\n \t∘\tThe notion of time t in diffusion processes used in the manuscript, t in U([0, 1]) is based on the CTMC definition by Campbell et al. but differs to that used on many other publications, e.g. [1] J. Ho, A. Jain, and P. Abbeel, “Denoising Diffusion Probabilistic Models”, [2] J. Sohl-Dickstein, E. Weiss, N. Maheswaranathan, and S. Ganguli, “Deep Unsupervised Learning using Nonequilibrium Thermodynamics,”. Thus, I would recommend introducing it e.g. as being in [0, 1] in line 185. Instead, this is first done in Equation 9.\n\n \t∘\tT^(0:1) as a diffusion path is first defined in line 284 even tough being used many times before.\n\n\n•\tThe evaluation metrics remain unclear even after reading the appendix A.3. This holds especially for “Evolutionary Plausibility”, “Stability”, “Self-association”, “Non-specific Binding”.\n \n•\tWhile the manuscript goes in great theoretical detail, intuition is often lacking. E.g. Equation 3 is introduced but an intuition, “first term maximizes rewards, while second term keeps the model close to the reference model.”, which could facilitate understanding for the reader is missing. \n \n•\tMany things necessary for fully understanding the paper are moved to the appendix, resulting in decreased readability. Further, this also applies to some of the most interesting results, e.g. Table 9 and especially Figure 4. \n \n•\tSome tables are hard to read, as their caption and corresponding text do not exactly describe what is in the table. E.g. \n \n \t∘\tIt is unclear what “reference“ in Table 1 describes.\n \t∘\tIn Table 3 the reader must guess that “ESM-2 based” refers to “utilizing different language models” from the text and “Multi-objective” refers to “we incorporated all constraints into the optimization objective”.\n\n•\tIn the abstract and introduction, a focus is put on “alleviate overfitting issues due to the scarcity of antibody-antigen training data”, but no analysis supporting such a claim is included. \n \n•\tThe analysis of the “impact of utilizing different language models in training the antibody design model” is very short and not well described. \n \n•\tFigure 4 is a very interesting figure which summarizes the capabilities of DiffAb, AbX, and AbNovo very well and highlights that AbNovo “performs best“. In there, we also observe that only a single antibody generated by DiffAb against 5NUZ does violate constraints. Therefore, it seems inadequate that the visualized antibody for DiffAb in Figure 2 is a sample which does not fulfill all constraints. Furthermore, the DiffAb sample with “Rosetta binding energy: -2.12, Evolutionary Plausibility: 2.60” violating constraints cannot be found in Figure 4. \n \n•\tSome claims appear exaggerated:\n \n \t∘\t“the first deep generative model for multi-objective antibody design, which explicitly optimizes multiple biophysical properties crucial for real-world antibody development.” There have been previous works which analyze the multi-objective setting for generating antibodies, e.g. “Pareto Front Training For Multi-Objective Symbolic Optimization\" by Faris et al. which train a algorithm to optimize a pareto front of sequences regarding the objectives antibody binding quality, stability, and humanness. Perhaps the claim can be weakened or reformulated?\n\n \t∘\tAbNovo is “bridging the gap between in silico design and practical application.” seems a bit too strong given that no practical application is contained.\n\n•\tTypo “Bolocks” in Figure 3 \n \nIn summary, I think this manuscript offers valuable new ideas but suffers from not being self-contained, sub-optimal readabilities and depth of analysis. I hope these issues can be addressed in the rebuttal and would love to increase my score in response." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024multiobjective,\ntitle={Multi-objective antibody design with constrained preference optimization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ktJJBvvUd},\nnote={under review}\n}" }, "abstract": { "value": "Antibody design is crucial for developing therapies against diseases such as cancer and viral infections. Recent deep generative models have significantly advanced computational antibody design, particularly in enhancing binding affinity to target antigens. However, beyond binding affinity, antibodies should exhibit other favorable biophysical properties such as non-antigen binding specificity and low self-association, which are important for antibody developability and clinical safety. To address this challenge, we propose AbNovo, a framework that leverages constrained preference optimization for multi-objective antibody design. First, we pre-train an antigen-conditioned generative model for antibody structure and sequence co-design. Then, we fine-tune the model using binding affinity as a reward while enforcing explicit constraints on other biophysical properties. Specifically, we model the physical binding energy with continuous rewards rather than pairwise preferences and explore a primal-and-dual approach for constrained optimization. Additionally, we incorporate a structure-aware protein language model to mitigate the issue of limited training data. Evaluated on independent test sets, AbNovo outperforms existing methods in metrics of binding affinity such as Rosetta binding energy and evolutionary plausibility, as well as in metrics for other biophysical properties like stability and specificity." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "antibody design", "diffusion generative model", "preference optimization" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/eb588a27ad8adb7dbc51478b0a2360d62e1c53f1.pdf" }, "presentation": null, "primary_area": { "value": "applications to physical sciences (physics, chemistry, biology, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Multi-objective antibody design with constrained preference optimization" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4l3AH8Bhmt
Revealing and Mitigating Over-Attention in Knowledge Editing
main
Active
model editing;mechanistic interpretability;NLP;language models
interpretability and explainable AI
5;5;6;8
4;4;4;4
3;3;3;4
2;2;3;3
3;3;3;4
6
4
3.25
2.5
3.25
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "•\tIt is mentioned that there are specificity errors for models of all types. Have parameter preserving or meta-learning methods also been investigated? It might be interesting to know the RS/RM and DNS/DNM scores for methods like GRACE or ICE.\n•\tI would suggest adding at least the scores for MEMIT and PMET to Table 1" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The paper impresses with its consistently comprehensible and stringent argumentation. The authors start with a problem of a current methodology, prove that this problem exists, identify the underlying significant cause and can thus propose a solution method for the problem. The paper is comprehensibly written and error-free throughout, the illustrations and tables are helpful and well chosen. An additional plus is the ablation study, which deals with the trade-off between editing success and specificity." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The use of LLMs in real-world scenarios and applications creates the need for procedures to correct and update the knowledge in these models. The aim here is to change the model's knowledge without costly retraining in order to prevent hallucinations or correct obsolete facts without diminishing the model's performance. \nRecently, the research field of knowledge editing has emerged, in which various techniques such as fine tuning, in-context editing, memory-based and locate-then-edit methods have already been proposed. The disadvantage of these methods is that they can negatively influence the model, especially if information of the edited knowledge triple or related content appears in the context. The study in this paper has set itself the task of shedding more light on this phenomenon, investigating its cause and proposing a method to prevent or mitigate this overcompensation of the edited model. In order to investigate the deteriorating specificity performance of an edited model, the authors develop two metrics and show that even a single updated fact can lead to a so-called specificity error.\nAn examination of these errors leads to the realization that they are mainly caused by attention activations, the attention module places too much focus on the edited information (attention drift) and ultimately predicts an incorrect token. Consequently, the authors propose selective attention drift restriction (SADR) as a method to mitigate this false focus." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "A look at the appendix shows that the experiments for this article were much more extensive than stated in the actual paper. in addition to further details and results of the experiments described, further results for additional editing methods (WISE, MEND) and additional data sets can be found here. A human evaluation is also attached. It is a pity that even the section on limitations and future work did not find space in the main text. A minor weakness of the paper could be that it is not made clearer why the experiments are limited to locate-then-edit methods, although it is emphasized that the specificity error also occurs with meta-learning and parameter-preserving methods.\nTypo line 47: Paris" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See weakness." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The specificity is an important problem of the knowledge editing and the proposed method can effectively alleviate this problem.\n2. The authors consider the specificity problem comprehensively and conduct a thorough evaluation of SADR against existing methods and models, providing a comprehensive analysis of its performance." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This work focuses on addressing the issue of over-attention during knowledge editing in large language models (LLMs). Knowledge editing techniques were developed to correct LLM's error by precisely modifying a small portion of the model's parameters. However, these methods can lead to Specificity Failure, where the model's existing knowledge and capabilities degrade post-editing. From the analysis in the paper, this phenomenon is attributed to Attention Drift, where attention heads excessively focus on edited entities. The authors propose Selective Attention Drift Restriction (SADR), which adds a regularization term to prevent undue shifts in attention during the editing process. Experiments show that SADR effectively mitigates Specificity Failure while maintaining or improving performance metrics like fluency and reliability across multiple LLMs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. From the experiment results, the proposed method leads to a performance drop in the generalization, which is actually an important metric in knowledge editing. In my view, this drop may be caused by the attention-learning method as it would make the model focus less on the subject in other contexts. This drawback would deteriorate the contribution of the method.\n2. Although the proposed method demonstrates good performance under the specificity metric, I'm not that convinced by the analysis and conclusion of the reason via the attention head. The attention head may be one reason it focuses more on the subject. However, as the editing is conducted at the MLP in some methods, it may also be the editing vector that influences the specificity.\nThis can be seen from recent work that the edit vector's direction[1,2], space[1], and norm[2,3] would influence the specificity. For example, if we constrain the updated W, the information flow may not be dominated by huge logits. \nSome works are contemporary work and I don't require the experiment results, but a proper analysis would encourage me to raise my score. \n3. About the decoding constraints, can you provide a comparison between the attention-based and decoding-based constraint[4] methods here?\n\n[1] AlphaEdit: Null-Space Constrained Knowledge Editing for Language Models\n\n[2] Knowledge Circuits in Pretrained Transformers\n\n[3] Perturbation-Restrained Sequential Model Editing\n\n[4] Decoding by Contrasting Knowledge: Enhancing LLMs’ Confidence on Edited Facts." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "**Main Questions**\n* *Q1*: It would be better if the author could point out the reasons that lead to attention drift. One possible reference could be: after editing, the norm of the model parameters $\\hat{W}$ increases, causing the norm of the hidden layer vector $v^*$ to grow. This leads to an enhanced attention on the last token towards the edited subject.\n\n* *Q2*: Compared to conventional editing methods, how much additional time overhead does SADR incur? I noticed that SADR computes the attention weights for each layer before editing.\n\n**Minor Questions**\n* *Q3*: I notice that $\\mathcal{L}_{SADR}$ traverses all layers $l$ in Equation (2). So my question is: is it possible to achieve the same result by restricting attention weights of only one or a few layers?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* The paper is well-motivated: it explores the reasons behind the Specificity Failure observed in edited models, and proposes an effective solution to address this issue.\n* SADR is generalizable: by incorporating an additional loss function, the SADR can be applied to various knowledge editing techniques.\n* The article is well-structured: it first identifies specificity failure through guided experiments and then delves into the causes of specificity failure. Finally the paper proposes solution.\n* The ablation study proves the effectiveness of the method." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The author finds that the existing knowledge editing methods tend to spend over attention on the knowledge that has already been edited. This leads to failure in the model's answers when the edited subject appears in context (Specificity Failure). This article takes the first step towards alleviating specificity failure, which consists of two parts: 1) Investigating the reason for specificity failure; 2) Proposing a new loss function. In the first part, the author first finds that the last token of the edited subject leads to attention drift and then proposes a preliminary solution to alleviate specificity failure. Based on the above findings, this paper proposes a new method (SADR) in the second part, which effectively mitigate the specificity failure." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "**Main Weaknesses**\n* *W1*: I suggest conducting additional experiments on Mquake [1] to prove the effectiveness of the method. Recent research [1] has shown that existing knowledge editing methods are not good at multi-hop editing. For example, when we edit a piece of knowledge from *<CountryX, Prime_Minister, PersonY>* to *<CountryX, Prime_Minister, PersonZ>*, the corresponding knowledge *<CountryX, First_Lady, PersonY's wife>* should also be changed to *<CountryX, First_Lady, PersonZ's wife>*. Based on the paper's findings, the failure of multi-hop questions is because the edited model's over-attention on the subject CountryX. So I'm curious about whether SADR can effectively solve the above-mentioned problems. \n\n**Minor Weaknesses**\n* *W2*: I notice that in Line 165, the editing target is represented as $o^*$, while in other places it is represented as $o_{edit}$. Perhaps changing all occurrences of $^*$ to $_{edit}$ can improve the readability of the article.\n\n* *W3*: In Table 2 *Relation*, Equation 3 seems to have extra 'xs'. \n\n**Missing References**\n* Knowledge Editing for Large Language Models: A Survey. (2023)\n* A Survey on Knowledge Editing of Neural Networks. (2023)\n\n$Ref$:\n\n[1] Mquake: Assessing knowledge editing in language models via multi-hop questions. (2023)" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "See Weaknesses." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper addresses a critical issue in knowledge editing for LLMs, focusing on the problem of specificity failure, which is essential for ensuring model stability after modifications. The proposed SADR method offers a novel extension to existing techniques by dynamically constraining attention heads to prevent over-attention on edited entities, effectively improving specificity. The method is thoroughly evaluated across multiple models and tasks, showing significant improvements in mitigating attention drift while maintaining high edit success rates. Additionally, SADR is versatile and adaptable to various knowledge editing approaches and model architectures, enhancing its applicability in diverse editing scenarios." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes the Selective Attention Drift Restriction (SADR) method to address the issue of specificity failure in knowledge editing for LLMs. This failure occurs when models, after being edited to modify specific factual knowledge, disproportionately focus on the edited entity, leading to incorrect outputs in related contexts. SADR introduces a regularization term during knowledge editing to restrict excessive attention on the edited knowledge. The method is evaluated on five language models and shows improvements in mitigating specificity failures without significantly affecting edit success rates." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The methods section is overly concise, i.e., Section 4 does not provide a thorough explanation of SADR. For example, why is KL divergence used to constrain the two attention weights in Eq. 2? Is there a theoretical basis or any prior work that can be referenced?\n\n2. While the SADR method shows significant improvements on the Relation and Distract Neighborhood tasks, the performance drop on generalization metrics suggests that the method struggles to balance specificity and generalization. Table 4 shows a general decline in generalization, especially for PM, which dropped by as much as 20 points. Can sacrificing generalization to improve specificity really be considered effectiveness?\n\n3. In Table 6, the max difference with or without head selection is less than 1.5 points (some difference is less than 0.5 points). Could this be due to random fluctuations? Could you provide a significance testing to demonstrate the effectiveness of head selection? Additionally, what would the performance be if a head were selected at random?\n\n4. There is a lack of efficiency analysis. Does using SADR increase computational load, memory usage, or runtime?" }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We analyze the reasons behind specificity failure in knowledge editing and mitigate it with our method." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024revealing,\ntitle={Revealing and Mitigating Over-Attention in Knowledge Editing},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4l3AH8Bhmt},\nnote={under review}\n}" }, "abstract": { "value": "Large Language Models~(LLMs) have demonstrated superior performance across a wide range of tasks, but they still exhibit undesirable errors due to incorrect knowledge learned from the training data. To avoid this, knowledge editing methods emerged to precisely edit the specific model knowledge via efficiently modifying a very small percentage of parameters. However, those methods can lead to the problem of **Specificity Failure**, where the existing knowledge and capabilities are severely degraded due to editing.\nOur preliminary indicates that Specificity Failure primarily stems from the model's attention heads assigning excessive attention scores to entities related to the edited knowledge, thereby unduly focusing on specific snippets within the context, which we denote as the **Attention Drift** phenomenon.\nTo mitigate such Attention Drift issue, we introduce a simple yet effective method **S**elective **A**ttention **D**rift **R**estriction(**SADR**), which introduces an additional regularization term during the knowledge editing process to restrict changes in the attention weight distribution, thereby preventing undue focus on the edited entity.\nExperiments on five frequently-used strong LLMs demonstrate the effectiveness of our method, where SADR can significantly mitigate Specificity Failure in the predominant knowledge editing tasks." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "model editing", "mechanistic interpretability", "NLP", "language models" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/663c7e55f482138b900a07812bd44610ee79e278.pdf" }, "presentation": null, "primary_area": { "value": "interpretability and explainable AI" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/dfe1640e986ee97c135fc1a06d1c8109324c8747.zip" }, "title": { "value": "Revealing and Mitigating Over-Attention in Knowledge Editing" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ltiMYgJo9
A closed-loop EEG-based visual stimulation framework from controllable generation
main
Active
Neural modulation; EEG; Close-loop;
applications to neuroscience & cognitive science
3;3;5;8
4;2;4;3
3;2;3;4
2;2;3;4
1;1;2;3
4.75
3.25
3
2.75
1.75
0.036835
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "In Sec 3.2, what do the actions and states of the MDP refer to in this context? Are the actions features, because the algorithm is selecting features of the neural activity to represent? Or are the actions the selected images to be used as visual stimuli? \n\nWhat is the motivation for not updating the gradients in the model? The abstract says this allows \"us to directly analyze the relationship between the administered visual stimuli and the targeted brain activity\", but I wasn't sure why this is the case or where in the paper this motivation is fully explained or justified.\n\nIn Figure 1, what is the difference between \"selection\" and \"action\"?\nIn Fig 2, the distance metric seems to be applied to images, but I thought the point was to compare induced and target neural activities." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The problem that this paper takes on is very interesting. I am aware of previous research that has attempted to find preferred visual stimuli for single neurons, so as to figure out what that neuron \"prefers\", but this paper seems to be taking on a related but quite different issue, which is: given a whole pattern of population activity, what stimulus would elicit that overall pattern? This seems like a project that may have useful clinical applications in the future, as well as being scientifically interesting in its own right." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper develops a method for choosing the optimal image stimulus to present to a human subject to elicit a specific desired pattern of neural activity (as measured using EEG)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I found the paper hard to follow. I admit that a contributing factor here may be my own lack of experience with respect to some of the techniques the paper uses, such as EEG data, diffusion models, and genetic algorithms. However, I do think that the presentation of the paper could be much clearer, and I will list some examples below of specific issues that came up with respect to clarity. \n\n- Most of the figures I did not understand, and as far as I could tell, the figures aren't referred to in the main text, so it was difficult to situate what the purpose of each figure was in the overall narrative of the paper. \n- It is unclear what the purpose of the MDP is in Section 3.2 (see Questions below).\n\nIt would probably have been useful to include a Supplemental section to explain some of the methods in more detail." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 4 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "The authors state that the identified stimulus is \"optimal.\" Based on the MDP formulation of the algorithm, I understand that it finds a local minimum. Could you clarify how this approach ensures finding a global optimum, rather than a local one?\n\nWhy did you limit the comparison to the first 250 ms (Figure 4D)? While the initial 250 ms may indeed capture critical visual information, it is common in EEG analysis to display the full 1000 ms post-stimulus data. Could you elaborate on this choice?" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "Very interesting study, timely, solves an important question, is generalizable." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This is a highly innovative study demonstrating the capability to identify visual stimuli that closely match the original stimuli eliciting specific EEG activity patterns. The algorithm is well-explained and, to my knowledge, represents one of the first successful applications of this approach with EEG data." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I can hardly identify any significant limitations in the current study. However, I have two questions:\n\nThe authors state that the identified stimulus is \"optimal.\" Based on the MDP formulation of the algorithm, I understand that it finds a local minimum. Could you clarify how this approach ensures finding a global optimum, rather than a local one?\n\nWhy did you limit the comparison to the first 250 ms (Figure 4D)? While the initial 250 ms may indeed capture critical visual information, it is common in EEG analysis to display the full 1000 ms post-stimulus data. Could you elaborate on this choice?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. The limitations of past studies on closed-loop neural encoding/decoding were not adequately justified, weakening the contribution of the study.\n2. The subtitles are not well match the items in the framework, making the manuscript is not easy to follow.\n3. The encoding model has not been adequately validated. This module is very critical for the proposed framework. In addition, the implement details of the encoding model are not clear, e.g., was the model trained using individual data or data from multiple subject? How many training samples are used to train the encoding model? How to validate the model?\n4. Is the EEG encoder which has been aligned with CLIP image features a good choice? This alignment may introduce bias in feature representation of the target and generated EEG signals. Why not a naive EEG encoder? \n5. All the figures and tables are not referenced in the main text, making it quite difficult to read the figures. For example, what is encoded by the dot size in Figure 3c? What is the image with red boundary in Fig. 3d step 10?\n4. Are there any failure cases? What I can imagine includes: 1) the random samples in the first round roulette wheel fail to cover the target; 2) The generated images at a certain iteration fail to cover the target. The authors are encouraged to discuss this issue. \n6. “Since different stimulus images in our framework can produce the same or similar EEG features”—this could attribute to the existence of Metamers. However, other factors can not be overlooked: 1) the limitation of EEG (low spatial resolution) in quantifying brain activity. It might be possible that different stimulus image evoke similar EEG responses due to the limitations of EEG. 2) The limitation of the model for EEG feature prediction (the encoding model 3.1). The authors are encouraged to make justifications more carefully.\n\nOther issues:\n“quotation marks” are not in right format\nThe font size of some text in the figures are too small to read.\nTypos: in Figure 4 captions: (F) is for O2 channel?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The proposed closed-loop framework for synthetic visual stimuli generation in novel in several ways, in terms of the retrieval strategy for identifying candidate images, the feature selection approach, and the method to addressing the problem of unknown target query image. The framework and related methodologies are well designed and presented in general." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors proposed a closed-loop stimulation framework for EEG-based visual encoding, aiming to generate visual stimuli to elicit specific neural activities through controllable image generation strategy. In this framework, the authors control the stimulus image generation by approximating the brain activity evoked by the visual stimulation towards the desired neural response that corresponds to the candidate images rated by human users iteratively. Controlling visual stimuli in visual encoding studies is very important. Meanwhile, the stimulus images in most prior studies are relatively arbitrary as there is no standard criteria. The proposed framework provides a possible solution to this problem." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The weaknesses of the manuscript lie in the lack of details and validations. For example, the details of encoding model are not sufficient. The authors described the architecture of the encoding model, however, the details for training such an encoding model is missing. he authors should provide details about training procedure, including data sources. Was the encoding model trained using data from multiple subjects or was it subject-specific? What is the method to validate the encoding model? More importantly, the encoding model was not adequately validated (at least I didn’t see any results related to the encoding model) given its critical role in the framework. In addition, what are the criteria and how to validate that the synthetic images are the “optimal” subset that can evoke specific neural activities? Similar issues exist in other modules, e.g., feature selection and interactive search. The authors are encouraged to validate each module separately rather than integratively." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. What kind of the neural activity are you concerning in your experiment? How will you verify whether the activity is properly stimulated by your visual stimuli?\n2. If the answer to the previous question is via the EEG encoder, then how can the encoder capture your concerned neural activity? How does encoder perform? How will the selection of the encoder influence the result?\n3. What is the reward in the MDP?\n4. For Figure 3.B, why do you choose subject 8 for demonstration? It seems the confidence interval is large. I wonder whether the similarity increase can pass the significance test.\n5. How to interpret the spectrograms in Figure 4.C? I can't see the difference or some trends from the figure. \n6. How is Figure 4.D obtained? Why does the \"random\" also look so good?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. The whole framework is novel and interesting. It addresses the challenge to find the corresponding stimuli that can evoke a specific brain signal pattern. The framework may have the potential to be applied to a more realistic scenario. \n2. The paper proposed two different settings for finding the visual stimuli: retrieval and generation, and provided corresponding solutions for them. \n3. The overall findings may provide interesting neuroscience intuitions and may ignite further contributions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper devised a closed-loop framework to find the visual stimuli that can elicit specific neural activities. The authors models the whole process as an MDP, and proposed to use the interactive search (mind matching) and heuristic search (genetic algorithm) to solve the problem. While claimed general, the authors specify the framework to train the EEG encoding model to generate the synthesized EEG response and test it offline on the THINGS-EEG2 dataset. Visualized results demonstrate the possibility of the whole framework to find the appropriate visual stimuli in the search space. The authors also mentioned its possible impact and insights." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. One of the main claims by the authors is the adaptation of the whole close-loop framework. While the authors claim it can be simply replaced by recording EEG data from human participants, there are actually no more concrete demonstrations on how. For example, what is the \"specific neural activity in the brain\" in this paper and in a possible real scenario? What's the difference? And how difficult is it and how much effort will it take to apply the framework to the real world? It's always easy to just claim a methodology \"generalizable\", but without more justification that doesn't actually help strengthen the contribution of the paper.\n2. Based on 1, I feel it is not sufficiently demonstrated in the paper what role the EEG plays in the whole framework. As far as I can understand from the current paper, it seems to be related to the reward $R$ in the MDP design, because it should provide signal based on the desired neural activities. However, we know neither how the reward is exactly calculated nor what kinds of the neural signal the authors are caring about (e.g., a specific frequency bank? a specific shape of waveforms? a specific activation from some brain area?). \n3. Besides the methodology, it's also not clear how the different part of this framework performs and contribute to the final result from the experimental aspect. While in the result section, we can see that the framework can yield promising visual stimuli result, it lacks either quantitative experiments and comparison between selection of algorithms, or a more detailed explanations on the presented ones. (See questions.) Therefore, it's unclear for me what the exact performance of the whole framework and individual parts compared to other solutions.\n4. Overall, the presentation of this paper is unsatisfying (and that's probably why I have the concerns in 2 and 3). On the one hand, the author is presenting more well-known details in the main content but didn't make their own claims clear. For example, the algorithm 1 and algorithm 2 is a direct adaptation from previous work. Instead of using space to present them, I wish to see more on how the MDP is constructed. On the other hand, mixing citations with sentences (please use \\citep instead \\cite) and a few typos (in line 222, algorithm 1, the bracket is not matched) give me the feeling that the paper is not yet ready to be published." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024a,\ntitle={A closed-loop {EEG}-based visual stimulation framework from controllable generation},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ltiMYgJo9},\nnote={under review}\n}" }, "abstract": { "value": "Recent advancements in artificial neural networks (ANNs) have significantly refined methodologies for predicting the neural coding activities of the ventral visual stream in human and animal brains based on visual stimuli. Nevertheless, the endeavor to control visual stimuli to elicit specific neural activities continues to confront substantial challenges, including prohibitive experimental costs, the high-dimensional nature of stimuli, pronounced inter-individual variability, and an incomplete understanding of neuronal selectivity. To address these impediments, we propose a novel electroencephalography (EEG)-based closed-loop framework for visual stimulus. Leveraging this framework, we can identify the optimal natural image stimulus within a theoretically infinite search space to maximize the elicitation of neural activities that most closely align with desired brain states. Our framework employs advanced ANN ensemble models to ensure the reliability of neural activity predictions. Furthermore, we conceptualize the brain coding predicted by the ANN model as a non-differentiable black-box process, allowing us to directly analyze the relationship between the administered visual stimuli and the targeted brain activity. Our research demonstrates that, independent of the exactness of the ANN-predicted brain coding, the proposed framework can procure the theoretically optimal natural image stimulus at given cycle steps. Moreover, our method exhibits generalizability across different modalities of brain-specific activity regulation. Our code is available at https://anonymous.4open.science/status/closed-loop-F2E9." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Neural modulation; EEG; Close-loop;" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/08ce57cf674802154179e66199c7fb3058fb0326.pdf" }, "presentation": null, "primary_area": { "value": "applications to neuroscience & cognitive science" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "A closed-loop EEG-based visual stimulation framework from controllable generation" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4mFEb3JvMc
A case for data valuation transparency via DValCards
main
Active
data valuation;fair compensation;transparency;fairness;bias
datasets and benchmarks
3;3;5;6
4;4;3;4
2;2;4;4
2;2;3;3
2;2;3;4
4.25
3.75
3
2.5
2.75
-0.333333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See above." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper is easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents an empirical study of existing data valuation methods in terms of their sensitivity to pre-processing, the consequences of using them for data selection, and the tendency of undervaluing minorities." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The paper's technical contribution is a bit limited, mainly focusing on evaluating existing methods.\n- The findings from the paper are not novel. (1) Regarding the sensitivity to data imputation methods: data valuation fundamentally determines the contribution of a given data point based on the other data used together for training; hence, it is straightforward to see that the value of a data point would change depending on the choice of the imputation method because different imputation methods would change the formation of other data points. (2) Regarding the class imbalance: it is also natural that directly using data values to remove data would lead to class imbalance. This is because data valuation by design would assign same score to same data points. As a result, one would either remove two identical data points at the same time or keep them altogether, which in turn leads to a loss of balance in class representation. In fact, there has been existing work theoretically characterizes the limitation of using data valuation for data selection: https://arxiv.org/abs/2405.03875 (3) Regarding the last finding about undervaluing the minorities: this validity of this finding depends on the choice of validation data. If the validation comprises data points all from the underrepresented group, then the value of that group would be high instead of low as reported by the paper." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "None." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "The paper is well-motivated and conveys an essential message: existing data valuation methods, primarily designed for machine learning, may be unsuitable for data compensation in data markets. It highlights various practical challenges that emerge when these methods are repurposed for economic applications. Backed by comprehensive experimental analysis, the paper’s findings offer valuable insights and serve as practical guidelines for the effective design and implementation of data valuation metrics in data market contexts." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper conducts comprehensive empirical evaluations of existing data valuation metrics, identifying significant biases and instability in data-centric machine learning (ML). Key findings include: (1) common and inexpensive data pre-processing techniques can drastically change estimated data values; (2) subsampling using these metrics may exacerbate class imbalance; and (3) data valuation methods may undervalue data from underrepresented groups, raising ethical concerns. In particular, marginal contribution methods, such as Shapley-based approaches for tabular classification, demonstrate high variability due to data imputation preprocessing and may affect class balance and group fairness. To address these challenges and improve transparency, the paper introduces the novel Data Valuation Cards (DValCards)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper raises an important issue, though its main limitation appears to be the lack of a fundamental solution. While DValCards help mitigate the issues of instability and fairness, they primarily serve as a more detailed documentation tool for data valuation methods.\n\nThe paper makes a valuable contribution by highlighting the challenges of existing data valuation approaches through extensive empirical evaluations, including issues related to instability, class imbalance, and fairness. However, some of these findings are not entirely unexpected. For instance, the instability of current metrics when different data imputations are applied is not very surprising: if the dataset changes, the data point values will change. In addition, it is not entirely clear why stability to data imputation should be considered an inherent property of a data valuation metric. Regarding fairness, it is not surprising that existing methods, which primarily aim to optimize test accuracy, might introduce bias. Nevertheless, the systematic evaluation using real-world data is valuable and provides an important, evidence-based perspective on these issues." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Can the authors provide more information on whether imputation methods actually improve performance on standard valuation tasks ? \n- It would be nice to use data valuation methods (used in certain places) instead of data valuation metrics (used more commonly in the paper), since they are generally referred to as frameworks. \n- Can we see more examples of DVal Cards in this work? For a major contribution, the main paper has only one DVal Card and it seems to be a generic setting. It would be really interesting to see multiple DVal Cards and will reinforce the utility of having such a framework. A comparison" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- The authors provide a elaborate and comprehensive analysis of the impact of preprocessing techniques and class imbalance on data valuation metrics, especially imputation methods and their effects on class balance and rank stability. 12 Open-ML datasets are considered and 4 Data Valuation frameworks are chosen for comparison. \n- The introduction of DValCards is a valuable contribution to the field, providing a standardized framework for reporting critical information about data valuation metrics.\n- The paper raises important ethical considerations and implications of using data valuation metrics in context of a case study that highlights risks to undervalued groups." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates the properties of data valuation metrics, specifically their bias and instability, through case studies on real-world datasets. The authors highlight the limitations of data valuation metrics, including the impact of preprocessing techniques, minority groups, and technical and ethical side-effects. To address these limitations, they introduce DValCards, a standardized framework for reporting critical information and supporting decision-making about data valuation methods. The paper presents results on the instability of data valuation methods across different imputation techniques and highlights the implications of these inconsistencies using a case-study." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The effectiveness of imputation preprocessing methods in standard data valuation tasks (eg. weighted training, noisy label detection) is not thoroughly evaluated, and the authors could provide more evidence. Instability of values is known in Data Valuation literature, but specifics with respect to imputation methods are not widely studied. \n- Since this paper is trying to unify a setting for all Data Valuation methods, it could benefit from expanding its scope to include runtime analysis (FLOPS analysis of the method), limitations with respect to scaling and tradeoff with performance. It would be worth including the impact of validation sets [1,2] on data value. It might be worth looking into other works to unify data valuation frameworks such as [3]\n- The DVal Report in the DVal Card is reporting the data value range. However for a dataset, this may vary by just varying either the learning algorithm , or the performance metric or the valuation framework. Data Values (especially their min max values) can have varying values but their rank stability, performance on standard data valuation tasks (noisy label detection or weighted training for instance) can help improve this part of the report. \n\n[1] Kwon, Yongchan, and James Zou. \"Data-oob: Out-of-bag estimate as a simple and efficient data value.\" International Conference on Machine Learning. PMLR, 2023.\n\n[2] Jahagirdar, Himanshu, Jiachen T. Wang, and Ruoxi Jia. \"Data Valuation in the Absence of a Reliable Validation Set.\" Transactions on Machine Learning Research.\n\n[3] Jiang, Kevin, et al. \"Opendataval: a unified benchmark for data valuation.\" Advances in Neural Information Processing Systems 36 (2023)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Is there a reason to limit to supervised classification? Is the method limited or more widely applicable to other settings?\n\nIn Section 3.1\n`We find that varying the applied data imputation method results in appreciable variation of data val-\nues,`\n\nAnd similar mentions of the instability of data valuation methods. The question is: Is the instability arising from the definition of the data valuation? Or from the estimation of the data valuation? The former suggests a fundamental methodological flaw of data valuation while the latter is due to the lack of better and more efficient computational techniques.\n\nFollowing the previous question, if instability is a key limitation (of either data valuation, or estimation methods), specifically how does the proposed framework in Section 4 address it, by advocating for transparency?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The studied problem of data valuation is important and growing.\n- The paper is relatively well written.\n- The experimental results are with respect to real-world datasets." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper studies data valuation and specifically the transparency of it. The authors highlight some issues of existing data valuation methods, in particular the bias of data values, which can result in technical and ethical consequences. The authors provide empirical evidence for such claims. The authors propose a framework called DValCards to encourage transparence in data valuation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The claims against existing works are largely observational and empirical, and do not seem to be theoretically supported.\n- The motivation for the DValCards can be made better. It seems that before Section 4, the authors are describing the issues with existing data valuation methods. In Section 4, where one might expect a mitigation or solution, the framework that does not seem to address these issues is described.\n- Furthermore, the framework itself does not seem to be very extensively described or examined, in terms how it is applicable and beneficial." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We show that data valuation methods can be biased and unstable, necessitating the need for DValCards for improved transparency" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024a,\ntitle={A case for data valuation transparency via {DV}alCards},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4mFEb3JvMc},\nnote={under review}\n}" }, "abstract": { "value": "Following the rise in popularity of data-centric machine learning (ML), various data valuation methods have been proposed to quantify the contribution of each datapoint to desired ML model performance metrics (e.g., accuracy). Beyond the technical applications of data valuation methods (e.g., data cleaning, data acquisition, etc.), it has been suggested that within the context of data markets, data buyers might utilize such methods to fairly compensate data owners. Here we demonstrate that data valuation metrics are inherently biased and unstable under simple algorithmic design choices, resulting in both technical and ethical implications. By analyzing 9 tabular classification datasets and 6 data valuation methods, we illustrate how (1) common and inexpensive data pre-processing techniques can drastically alter estimated data values; (2) subsampling via data valuation metrics may increase class imbalance; and (3) data valuation metrics may undervalue underrepresented group data. Consequently, we argue in favor of increased transparency associated with data valuation in-the-wild and introduce the novel Data Valuation Cards (DValCards) framework towards this aim. The proliferation of DValCards will reduce misuse of data valuation metrics, including in data pricing, and build trust in responsible ML systems." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "data valuation", "fair compensation", "transparency", "fairness", "bias" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/a3dd44552c598d529e6aa8d1b0c461266a4c2cfb.pdf" }, "presentation": null, "primary_area": { "value": "datasets and benchmarks" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "A case for data valuation transparency via DValCards" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4mni4W1ZXy
Regularity explains emergence
main
Active
large language model;emergence ability;approximation;scaling law;regularity
foundation or frontier models, including LLMs
3;3;5;5
3;3;4;4
2;2;3;3
2;2;2;2
1;2;2;2
4
3.5
2.5
2
1.75
1
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- Can the theory predict specific model sizes or conditions where emergent behavior happens? Is there a certain size, N, where the model shifts from smoothing f∗ to accurately capturing it in areas with sharp changes?\n\n- Could you design an experiment with an autoregressive transformer model that would produce results more relevant to the theory?\n\n- Can the theory’s error bounds predict error rates for specific tasks at different model sizes?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- I appreciated the theoretical framework based on Siegel & Xu (2020), which links the regularity of optimal response functions with the concept of emergence. This framework offers a fresh perspective on the phenomenon of \"emergent abilities\" in large language models (LLMs).\n\n- The main theorem effectively illustrates how model size relates to approximation quality, especially in regions where the optimal response function shows complex behavior. Although primarily qualitative, this theoretical foundation provides valuable insights into why larger models may perform better with irregular functions.\n\n- I found some of the empirical results intriguing, particularly the scaling experiments with Qwen models that revealed various trends in arithmetic calculation outcomes." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates the concept of \"emergent abilities\" in large language models (LLMs) by developing a theoretical framework based on the regularity (or smoothness) of the optimal response function. The authors suggest that LLMs approximate this response function by smoothing out regions with high derivative values, leading to approximation errors that gradually decrease as the model size, N, grows. The theory proposes that as N increases, the model can capture more complex aspects of the response function without the need for smoothing, which results in sudden improvements or \"emergence\" of new abilities. The authors present a key theorem that quantifies the relationship between model size and approximation quality. They also provide experimental evidence to support the theory, including function approximation with ResNets and arithmetic tasks to demonstrate the model’s behavior in regions with high derivatives." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- Although the paper provides a unique and intuitive perspective on the mechanisms underlying emergence, it doesn’t specify a precise threshold or clear scaling rule to predict when this emergence occurs. I would appreciate it if the authors could better highlight exactly what constitutes the list of \"quantitative/concrete\" predictions proposed by the theory.\n\n- The toy model experiments using ResNet don’t closely match the large language models (LLMs) setup. This setup is qualitatively different from the autoregressive transformers typically used in LLMs. While the authors argue that the theory applies to any model type, this actually highlights a limitation of the theory rather than supporting the use of ResNets to examine phenomena observed in LLMs.\n\n- The choice of arithmetic tasks doesn’t clearly connect to the theory’s focus on changes in derivatives, as the observed U-shaped trend can be explained solely by the task structure. Choosing tasks more closely aligned with the theory would make the paper’s ideas clearer and more applicable.\n\n- Overall, while I find the results in some parts of the paper interesting, they often appear disconnected, lacking a clear and logical progression.\n\n- Presentation should be improved. In particular, it would greatly help if captions contained the necessary information to understand the content beyond what is provided in the existing title headers." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "How do the authors define the derivative over token valued neural networks? \n\nCan the authors systematically evaluate the derivative and inferred the smoothed input-output function on a more general class of language models? \n\nTo solidify their central claim, can the authors analyze models of increasing size showing convergence to their central claim with model size?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "I found the central claim interesting but preliminary for several reasons. Theoretical insight into how computations in language models can achieve zero shot task behavioral changes– for example– sorting a last in ascending vs descending order based on small changes in prompt are interesting. The idea that behavior on such tasks is influenced by the magnitude of local derivative of output on training data leading to learning of an averaged function are interesting - -although it isnt clear how the smoothed function can perform computations insight clear." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Paper introduces the idea that LLMs and other machine learning display a smoothing behavior in regions of input space where derivative of model output with respect to input is large. The behavior is said to emerge in specific regions of parameter space where training data has a “large derivative” in such regions of input space the result is that the network learns a “smoothed version” of the input output map rather than the map itself. The \nclaim is that the averaging behavior scales with parameters number and can yield to “emergence”-- where performance of model jumps on specific tasks as a function of parameter number. The authors introduce and prove a theorem which states that when a model, neural network map, cannot meet a performance standard within epsilon, then the model will learn an averaged version of the training data. The paper then provides numerical experiments with ResNet for fitting a trigonometric function and then uses the Qwen chat model for some analysis of algebraic operations." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Technically, I find the notion of derivative in token space to be problematic. I have worked on similar problems in the case of CNNs where the notion of the derivative is well defined because inputs can be taken to be over the real numbers. \n\nThe problem with prompts is that tokenization causes the input domain for networks to be discrete valued (say integer valued), and the nature of the derivative on such input spaces is more more subtle. How is the derivative to be defined on such spaces? The problem is that the local behavior of a derivative taken on Z embedded into R is not representative of the notion that the authors seek– which is a function that measured changes on input instances. \n\nTherefore, I would like to see a much more rigorous development of the main theorem with specific definition and analysis of the derivative for token valued functions which are the main object of study for LLMs. \n\n\nSecond, the numerical experiments in the paper are very limited– the title of the paper is about language models, but the first experiment is on ResNet. \n\nThe language model experiment is limited and I do not see a global investigation of this notion of the network derivative in different regions of parameter space and the input-output function f or the “smoothed version S*f. \n\nCan the authors systematically evaluate the derivative and inferred the smoothed input-output function on a more general class of language models? \n\nTo solidify their central claim, can the authors analyze models of increasing size showing convergence to their central claim with model size?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Do you have any insights about how the relation between the number of parameters N and the optimal \\epsilon(N) is reflected in the accuracy plots for the arithmetic tasks? For instance, it seems that the threshold ‘saturates’, in the sense that for 32B-110B the accuracy is similar even for the digits where accuracy is not at 100%. As a visualization, could you show what the accuracy looks like for a fixed digit position and x-axis being model scale (from Figures 3-4)?\n2. You present average error results in Appendix C for the arithmetic tasks, and while general trends are the same as accuracy, it seems much noisier and the trend is not as consistent across model scale (eg. similar error between models with a difference of 2 orders of magnitude). Do you have any explanations for this, and could you also report the standard error across examples for the average error results?\n3. What was the reasoning behind choosing summation of single digit integers as opposed to performing regular addition on d digits, analogous to the multiplication setting? How would the results change for potentially ‘harder’ or ‘simpler’ subsets of examples on these arithmetic tasks (for example, addition where there’s no carry for the first digit, or multiplication where there’s no carry across the digits?)\n4. From the Big-Bench paper it was shown that the tasks exhibiting the most ‘linear’ trend in performance were perhaps more knowledge-based or required easier text manipulations, and the tasks with more ‘breakthrough’ performance trends had logical reasoning/sequential steps. How would this relate under your framework? I’m not sure these differences in the tasks are necessarily reflected in the regularity of the optimal function." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The theory is presented clearly, and the perspective of parameter size controlling the threshold on the extent to which the model predicts an irregular optimal response function is an interesting idea. The experimental setups are clear, and the synthetic setup is particularly compelling." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes an explanation for the mechanism behind emergent capabilities in large language models through the regularity of the optimal response function. The authors claim that models do not model the optimal response in regions where derivatives are large, instead opting to predict a smoother function obtained through averaging values. They justify this theoretically and have accompanying experimental results on a synthetic function and certain arithmetic tasks (multiplication, sequence of single-digit addition, and addition word problems), where some intuitions from their theory are reflected in the accuracy trends of Qwen models as the number of parameters scale." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- While the theory seems sound and the synthetic experiment is compelling, I still reserve some skepticism for the connection to the LLM experiments on arithmetic tasks. Particularly I believe the title of this paper “Regularity Explains Emergence” is very strong and has a high burden of proof especially given the numerous natural language tasks where emergence has been observed [1] and the extensive discussions around the empirical validity of emergence in the existing literature (eg. [2]). \n- To expand on this point, I currently can’t disentangle whether the theory provided by the authors truly gives an explanation for emergent capabilities in LLMs as they claim, or it provides one instance where emergence can occur and one can frame a theoretical narrative around. For the arithmetic tasks, while I can see that there can be conclusions drawn from the approximations of the gradient vector that are reflected in the accuracy trends across model scales and quantities like digit position and number of summands, I’m not convinced this is a result that we wouldn’t already expect intuitively and is necessarily explained from the theoretical results. The causal connection is not strong, likely due to the limitations of the theory and how it cannot explain more nuanced trends in eg. model scale (please see Questions below for expansion on this point).\n- In conclusion, I believe that the authors need to be more clear about the scope of their theory and the tasks considered in this work, or provide stronger connections between the observed emergence and the regularity of the optimal function. Are there examples of natural language tasks where the theory may predict a regular optimal response function and we do see linear improvements in the task across scale?\n\n- As a minor comment, there are areas in the paper where the writing has some typos and grammatical errors; I’ve listed several below but I’d like to ask the authors to go over their exposition and address some of the writing.\n\nLine 19: improves -> improve\n\nLine 44: \\citet instead of \\citep\n\nLine 47: task -> tasks\n\nLine 53: (Theorem 2.5 -> (Theorem 2.5)\n\nLine 56: avilable -> available\n\nLine 58: LLM model -> LLMs\n\nLine 61: method -> methods\n\nLine 282: and R-value function -> an R-value function\n\nLine 391: Figures 6-6 -> Figures 7-8\n\nLine 391: despite of -> despite\n\nLine 482-483: “On the other hand…” sentence needs rephrasing\n\n[1] Srivastava, Aarohi, et al. \"Beyond the imitation game: Quantifying and extrapolating the capabilities of language models.\" arXiv preprint arXiv:2206.04615 (2022).\n[2] Schaeffer, Rylan, Brando Miranda, and Sanmi Koyejo. \"Are emergent abilities of large language models a mirage?.\" Advances in Neural Information Processing Systems 36 (2024)." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1) I am unclear on what exactly your contribution is and what was already implicit in prior work - can you make that precise?\n2) Why is the assumption that embeddings of digits preserve the metric space structure true?\n3) Why are your datasets so small? What are the error bars? What am I supposed to see in the Figures ?\n4) What are the bars above the variables starting line 282?\n5) Sec 4.2. CoT: can you say anything more specific beyond speculation? Why are the derivatives multiplying and reducing?\n6) Why do you need a 2-component (2dim) function in your first example line 219 (why is one component not enough here?)?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The idea to try to relate expressiveness of the model in terms of its ability to approximate steeper functions could be interesting, \nif developed well." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies emergence of capabilities as a function of model size. It tries to argue that \"emergence\" happens in cases where the \nderivative of the ground truth is very large, and where larger models manage to approximate better. Experiments are provided 1) running a series of ResNets on a small domain sin/cos function and 2) querying Qwen models on multiplication, addition, and language-formulated multi-step addition." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "To begin with, the paper is very badly written, to the point of being unreadable. I had to apply a lot of guessing goodwill to try to understand what the main claims are. To illustrate with one example: The statement of Theorem 2.5 says \"Under assumptions 2.4\" (this is an assumption on the boundedness of the difference between loss of the minimizer and loss of the parametric minimizer with N parameters, called (5) in the paper) \"... Instead of the upper bound (5), which yields an infinite value...\". How can you assume a finite bound and then say it's infinite?\nSadly, the paper is so full of defectuous English that even with the best of interpretations it is not possible to follow beyond the vague main ideas.\n\nIt is not really clear what the contributions are on top of the work cited: Siegel and Xu,20, E et al 22, Wu 2023. Beyond a combination of results, what is new?\nSurely, it is known that large variability of the ground truth around a point gives more trouble to a model, and larger models interpolate better. \n\nAnother weakness pertains to the experiments. It is difficult to see how they illustrate the theoretical claim. First, a lot of assumptions are being made on the derivative, which is the key object of study. Like line 273: \"we will assume that [the embedding] keeps the metric space structure of the set {0, 1, · · · , 9}\" - without any justification I don't see why this is true. It is not clear to me how Figs. 1 and 2 demonstrate *any* emergence (and error bars are completely missing everywhere). \nFor the Qwen based LLM experiments, I am surprised how small the dataset is (128?). \nThere might be a potentially interesting observation in Lemma 3.2 saying that derivatives of middle digits are larger and thus harder to learn for small models, but the way this is written it is unclear whether this is true, and there might be confounding issues here (for instance, it's easy to guess whether the last digit is even or odd, given the two numbers to multiply; it could be that allowing 0 for the first digit increases the probability that guessing 0 there is correct, etc etc).\nFigs. 3 and 4 are not very conclusive without error bars, especially for such small training sets.\n\nThis paper needs to be carefully rewritten (and it wouldn't hurt to use a language model for grammar control). Apart from grammatical errors, there is general sloppiness (for instance, line 424 goes from \"Grace\" to \"Tina\", to name just one of many examples. Or the missing definition of \"i\" in the sum in line 219. What are d and k in line 219... Etc. )\nThe color scheme on all figures should be unified to go from lighter to darker (or something like that) for larger models - it doesn't help to have a color mix." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024regularity,\ntitle={Regularity explains emergence},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4mni4W1ZXy},\nnote={under review}\n}" }, "abstract": { "value": "We investigate the mechanisms behind emergence in large language models from the viewpoint of the regularity of the optimal response function $f^*$ on the space of prompt tokens. Based on theoretical justification, we provide an interpretation that the derivatives of $f^*$ are in general unbounded and the model gives up reasoning in regions where the derivatives are large. In such regions, instead of predicting $f^*$, the model predicts a smoothified version obtained via an averaging operator. The threshold on the norm of derivatives for regions that are given up increases together with the number of parameters $N$, causing emergence. The relation between regularity and emergence is supported by experiments on arithmetic tasks such as multiplication and summation and other tasks. Our interpretation also shed light on why fine-tuning and Chain-of-Thought can significantly improves LLM performance." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "large language model", "emergence ability", "approximation", "scaling law", "regularity" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/1d175c62d32e8b9c5a35daf795e76de298ea4a4f.pdf" }, "presentation": null, "primary_area": { "value": "foundation or frontier models, including LLMs" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/b8a09ca3a5631c76223ef47d0803ce26fbd54af4.zip" }, "title": { "value": "Regularity explains emergence" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4mqt6QxSUO
A Unified Riemannian-Geometric Framework for SARS-CoV-2 Detection from CT Scans
main
Active
SARS-CoV-2;Transfer learning;Medical image identification
transfer learning, meta learning, and lifelong learning
1;3;3;6
5;4;4;4
1;1;3;2
1;1;2;3
1;1;3;3
3.25
4.25
1.75
1.75
2
-0.727607
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "- Could the authors provide more empirical results comparing the proposed feature extraction with traditional methods to highlight the effectiveness of the Riemannian-geometric approach?\n\n- How does the computational complexity of the adversarial domain adaptation impact the framework's scalability for large datasets or real-time applications?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The framework creatively applies Riemannian geometry, particularly through a novel attention mechanism based on geodesic interpolation. This approach is not commonly explored in medical imaging, setting the work apart.\n\n- The proposed methods are theoretically grounded, with rigorous proofs for convergence and generalization bounds. This attention to theory enhances the credibility and robustness of the approach.\n\n- By addressing the need for reliable SARS-CoV-2 detection and domain adaptation in CT imaging, the paper is highly relevant to ongoing medical challenges. The framework’s potential applications beyond SARS-CoV-2 could drive further research in medical diagnostics and transfer learning.\n\n- Benchmark results indicate superior performance, especially in domain-shift scenarios, which highlights the model's practical effectiveness." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a novel framework for automated SARS-CoV-2 detection from pulmonary CT scans, combining advanced statistical learning theory, optimal transport, and information geometry. Key components include a submodular optimization-based image selection protocol, Riemannian geometry-inspired feature extraction via geodesic interpolation on a Fisher Information Metric-induced manifold, and a unified decision-making model with Bregman divergences. Additionally, the authors propose an adversarial domain adaptation mechanism using the Wasserstein-Fisher-Rao distance with graph-based regularization to handle domain shifts. The framework achieves state-of-the-art performance on benchmark datasets, suggesting significant contributions to both medical image analysis and theoretical machine learning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- The reliance on advanced mathematical frameworks like Riemannian geometry and optimal transport may limit the accessibility and reproducibility of the work, as these methods require specialized knowledge.\n\n- While the framework shows strong theoretical grounding, additional experiments contrasting the proposed Riemannian-geometric feature extraction with simpler alternatives would clarify the practical benefits of the added complexity.\n\n- The paper could better address real-world deployment considerations, such as computational efficiency and robustness in clinical environments." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "Please refer to Weakness" }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The method is illustrated in details." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a framework for SARS-CoV-2 detection from CT scans, integrating advanced concepts from statistical learning theory, optimal transport, and information geometry." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Lack of clear motivation. SARS-COV-2 detection from CT scans have been widely explored in past few years. What is the innovation of such design? The authors should state and summarize existing method. What is the limitations of existing methods? What is differences between proposed method and existing detection methods? \n2. Lack of quantitative comparison experiments. Does the proposed method perform better with existing method? The paper does not adequately explain how the theoretical framework connect to experiments or analysis. \n3. The writing lacks a cohesive structure that would typically guide readers from the theoretical underpinnings to their practical application in experiments, which makes it challenging to grasp the significance of the theoretical contributions in the context of the experiments conducted." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Are there any other more common experimental contexts where this method might be applicable?\n\nPlease address the practical utility of the chosen methodology, CT slice selection, when x-rays already achieve 97% accuracy." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Theoretical analysis provides convergence guarantees, generalization bounds. Riemannian geometry-inspired attention mechanism, feature integration is formulated as geodesic interpolation. The Fisher Information Metric, Riemannian manifold on feature space F, Bregman divergence, feature attention, decision making methods average balloting method, hierarchical balloting.\n\nMathematical statements appear valid, however the overall methodology appears questionable. Results are presented on a very specific data context where accuracy is already 97% using simpler x-ray imaging." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes to integrate cutting-edge concepts from statistical learning theory, optimal transport, and information geometry in order to detect SARS-CoV-2 from pulmonary Computed Tomography (CT) scans." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper methodology seems questionable. Why begin with a focus on \"optimal image selection protocol\" which is selecting an optimal 2D slices of a 3D volume. Why not just use the entire volume? Presumably SARS-CoV-2 affects the entire volume.\n\nThe experimental motivation is hard to understand. As stated, basic CNNs (Xception) already apparently achieve 97.97% classification accuracy of the condition from chest X-ray imaging. 2D X-ray imaging is a much cheaper and more widely used modality than 3D CT imaging." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. Why is Riemannian geometry necessary for this task, and how does it concretely improve SARS-CoV-2 detection from CT scans? Could the authors clarify how these equations impact practical performance?\n\n2. Can the authors provide more details on how their theoretical advancements (e.g., geodesic interpolation, adversarial domain adaptation) translate to real-world medical diagnostic improvements? Are there simpler models that achieve similar or better results?\n\n3. The decision-making framework seems overly complex. How does the Bregman divergence-based approach perform in comparison to standard voting or confidence aggregation methods commonly used in medical image classification?\n\n4. How robust are the theoretical guarantees (e.g., Theorem 3.2, Theorem 3.5) in real-world applications, and what are the specific conditions under which these guarantees hold for the dataset and task described?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "From my point of view, this paper overclaims the contribution. This paper attempts to integrate advanced mathematical concepts, such as Riemannian geometry, submodular optimization, and optimal transport theory, into the field of medical image analysis. However, the experiment can not demonstrate its contribution. The paper also introduces an adversarial domain adaptation technique, but no ablation study has proven its efficiency." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper attempts to integrate advanced mathematical concepts, such as Riemannian geometry, submodular optimization, and optimal transport theory, into the field of medical image analysis." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. There exists so many ''Meaningless Equations''. Several equations (such as Equation 4 involving the Fisher Information Metric and Equation 6 on geodesic interpolation) are overly complex and seem disconnected from the practical task of SARS-CoV-2 detection. Using these equations does not provide any clear advantage or insight into improving the detection process. Can author provide a figure that is able to show the connection among those equations and modules. Also, more experiments should be added in this paper to show how they improve the performance of the SARS-CoV-2 detection.\n\n2. Except for Weakness 1, this paper also makes overcomplication. The decision-making framework based on Bregman divergences and multiple voting schemes (Equations 10–16) adds unnecessary layers of complexity. These methods do not appear to address the practical challenges in SARS-CoV-2 detection, and their benefits are not empirically validated. Furthermore, I consider this framework can not only serve only one task, for other tasks this framework should be work. The experimental results only present on SARS-CoV-2 detection, which have achieved high accuracy by other methods, thus weaken this paper. \n\n3. What's the motivation? The paper fails to adequately explain why the complex mathematical tools used are necessary for solving the specific problem of SARS-CoV-2 detection. The connection between the mathematical framework and the medical imaging task is tenuous at best. I really confuse about the paper's objectives. There is not figure or any description that can bulid a strong connection between the proposed framework and SARS-CoV-2 detection. \n\n4. While the paper is mathematically dense, it lacks solid empirical results that justify the introduction of complex theoretical models. There is no clear demonstration that the advanced mathematical constructs (such as geodesic-based feature integration) outperform simpler approaches commonly used in medical image classification. More experimental result that related to other datasets/tasks should be added and discussed.\n\n5. The poor experiment. The presented experimental results do not convincingly demonstrate that the proposed methods significantly outperform existing techniques. The improvements shown are marginal and do not seem to justify the additional mathematical complexity introduced by the paper." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "We present a novel, theoretically grounded framework for automated SARS-CoV-2 detection from CT" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024a,\ntitle={A Unified Riemannian-Geometric Framework for {SARS}-CoV-2 Detection from {CT} Scans},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4mqt6QxSUO},\nnote={under review}\n}" }, "abstract": { "value": "We present a novel, theoretically grounded framework for automated SARS-CoV-2 detection from pulmonary Computed Tomography (CT) scans, integrating cutting-edge concepts from statistical learning theory, optimal transport, and information geometry. Our approach begins with a submodular optimization-based image selection protocol, utilizing a continuous greedy algorithm. The feature extraction process employs a Riemannian geometry-inspired attention mechanism, where feature integration is formulated as geodesic interpolation on a manifold induced by the Fisher Information Metric. We introduce a unified decision-making framework based on proper scoring rules and Bregman divergences, encompassing multiple voting schemes with proven consistency and asymptotic normality properties. To address domain shift, we develop an adversarial domain adaptation technique using the Wasserstein-Fisher-Rao distance, complemented by a graph-based regularization term derived from Gromov-Wasserstein theory. Theoretical analysis provides convergence guarantees for the adversarial training process and establishes generalization bounds in terms of optimal transport distances. Empirical evaluation demonstrates the superiority of our approach over existing methods, achieving state-of-the-art performance on benchmark datasets. This work not only advances the field of automated medical image analysis but also contributes fundamental theoretical insights to the broader domains of machine learning and optimal transport theory." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "SARS-CoV-2", "Transfer learning", "Medical image identification" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/4c82682f7ed6d22da7c1ca718ca6f1f2bcf8a53d.pdf" }, "presentation": null, "primary_area": { "value": "transfer learning, meta learning, and lifelong learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "A Unified Riemannian-Geometric Framework for SARS-CoV-2 Detection from CT Scans" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4muXQ5r8Ol
A-Bench: Are LMMs Masters at Evaluating AI-generated Images?
main
Active
Large multi-modal models;AI-generated images;Benchmark
datasets and benchmarks
3;5;6;8
5;3;4;5
2;2;3;4
1;2;3;3
2;2;4;4
5.5
4.25
2.75
2.25
3
0.083624
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Overall, the paper adds the evaluation of LMMs' high-level semantic cognition for AIGI, in addition to previous work using LMMs to assess image generation quality. However, it does not highlight the difference between AIGI tasks and conventional cognition tasks. Could the authors elaborate on this further?\n2. Generally, the authors focus more on evaluating the perceptual capabilities of LMMs, but these perceptual capabilities are more inclined towards the low-level aspects for AIGI tasks. Could the authors further elaborate on the differences between the low-level perceptual aspects of AIGI and some earlier works?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The authors manually annotated a dataset containing 2864 image quality issues, which contributes to the development of AIGI evaluation.\n2. The authors evaluate AIGI quality from high-level semantic aspects like counting and low-level aspects like distortion, providing valuable insights for subsequent general AIGI task evaluations.\n3. The paper's A-Bench includes the evaluation performance of multiple LMMs, offering guidance for researchers who wish to use LMMs for AIGI quality assessment." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Due to the existing evaluation models' inability to effectively assess the performance of AIGI tasks, more and more researchers are turning to LMMs for evaluating the quality of generated images. The authors question this approach and design a framework consisting of seven dimensions focused on high-level semantic understanding and low-level quality evaluation to assess the quality of AIGI. By manually annotating 2864 different image quality issues, the authors compare the evaluation performance of multiple open-source and closed-source LMMs and contrast these with human evaluation results, summarizing numerous shortcomings of LMMs in the AIGI quality assessment task." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Although A-Bench includes multiple LMMs, it lacks some of the latest SOTA models. Better models such as QWEN-VL2 and MiniCPMv2.6 can be found from opencompass. The paper does not specify the versions of gpt4o used, such as gpt-4o-2024-08-06 or gpt-4o-2024-05-13, which is crucial for future researchers.\n2. The AIGI models used to generate the dataset are somewhat outdated, lacking relatively advanced image generation models such as SD3, PixArt, Flux, etc. Currently, the more outstanding AIGI models often embed large language models, which might significantly impact the evaluation conclusions.\n3. The questions are all manually generated, which is certainly good. However, this makes the evaluation dataset difficult to expand and might lose value as AIGI models rapidly evolve. It would be better if the questions could be designed based on the text prompts of T2I models.\n4. Compared to previous work, The paper's main contribution, i.e., high-level semantic question answering, is not strongly related to AIGI and does not seem necessary to research specifically in the AIGI context.\n5. Two-thirds of the low-level semantic question-answering data come from other datasets, reducing the paper's contribution.\n6. The paper's findings are somewhat unremarkable. It is obvious that closed-source LMMs perform better than open-source ones, and some other findings, such as the LMMs' insufficient perception of distortion, have already been mentioned in works like Q-Bench." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. It would strengthen the paper if the authors could provide scientific backing for the proposed metrics, citing sources that systematically define each measure. Specific areas where additional references might be valuable include the validity of semantic reasoning components and quality perception dimensions, to help ensure that the chosen metrics align with established frameworks in the field.\n\n2. Providing further details on the image selection process would clarify the robustness of the benchmark. Specifically, information on the criteria for image selection, the diversity of image types, and the distribution of different content categories would offer valuable context. If possible, outlining how these factors impact the benchmark's representativeness, and validity could further enhance transparency." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The benchmark is undoubtedly useful. Given the growing reliance on LLMs to evaluate various AI-generated content like images, having a comprehensive, quantitative benchmark that assesses the effectiveness of LLMs in evaluation is highly valuable.\n2. The paper tries to objectively define the underlying metrics of evaluation.\n3. The benchmark development involved a rigorous process, starting with user studies to establish a baseline, followed by testing various LLMs, which adds credibility and depth to the analysis.\n4. While the findings align with expectations, quantifying the gap between human and LLM performance is a valuable contribution. It enables the research community to approach improvements in this field with a more data-driven perspective, facilitating measured, progressive advancements." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a benchmark designed to assess the efficacy of large language models (LLMs) in evaluating AI-generated images (AIGI). As the field increasingly depends on LLMs for this evaluation—sidestepping the high costs and time commitments of traditional user studies—quantifying the quality and reliability of LLM-based assessments is essential. While it's generally accepted that LLM evaluations fall short of human assessments, this paper provides a systematic analysis of the performance gap across various LLMs, comparing open-source and closed-source models to human evaluations.\n\nThe benchmark defines several key metrics within two primary dimensions: Semantic Reasoning and Quality Perception. Using this framework, the study measures the performance of multiple LLMs, revealing a substantial disparity between human judgment and LLM performance in AIGI evaluation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. While the metrics cover several important facets of semantic reasoning, they lack a rigorous scientific foundation, raising questions about whether they capture the full scope of semantic understanding as implicitly perceived by humans. Specific dimensions of semantic reasoning, such as cultural nuances, or emotional depth, may be missing from the current metrics, which could impact the holistic evaluation of AI-generated images. As such, while the comparisons of different LLMs using these metrics provide intriguing insights, it remains questionable whether these metrics are robust enough to serve as a truly holistic benchmark for evaluating semantic reasoning in AI-generated images.\n\n2. The number of images used (~2,000) feels arbitrary and may be insufficient to capture the nuanced aspects of reasoning and quality perception required for a comprehensive evaluation. Expanding the dataset to around 5,000–10,000 images, with careful attention to diversity across image types and contexts, could improve the robustness of the analysis. Additionally, it would be helpful for the authors to provide a rationale for this dataset size or acknowledge any limitations they faced in scaling up." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- I would recommend authors check out SelfEval [1] as another source of evidence that external models cannot be reliable for evaluating T2I models. Please discuss it if relevant.\n- In my experience there is a huge variance to the responses provided by LLMs/LMMs. Did the authors compute variance of the scores or perform any statistical significance studies?\n- L272 controversial -> counter factual\n- In the introduction (L112-L117), in my opinion, authors should provide some numbers to make the point that LMMs are still not masters at evaluating AIGIs. Right now authors state that \"there remains a considerable gap and significant room for improvement\". Instead providing some numbers can make it more straightforward.\n\n[1] Sai Saketh Rambhatla, Ishan Misra, SelfEval: Leveraging the discriminative nature of generative models for evaluation" }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 4 }, "strengths": { "value": "- Authors address a very important problem: Are current LLM/LMMs good enough to be used as judges for generative models? This line of research can provide valuable insights to train better LMMs for understanding AIGIs.\n- A-Bench along with standard LMM evaluation benchmarks provide a complete picture of an LMMs capability to understand both real and AI generated images.\n- The paper is well written and very easy to follow containing all the details necessary for reproduction.\n- The experimental section is exhaustive with comparisons provided for both proprietary and open-source LMMs." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "- Human evaluations are the gold standard for evaluating generative models especially text-to-image (T2I) models. However, they are expensive. An alternative is using automatic metrics and Large Multi-modal Models (LMMs) are a popular choice.\n- LMMs are trained on real images and AI-generated Images (AIGIs) are out of domain for LMMs questioning their reliability as evaluation models. \n- This work proposes A-Bench a diagnostic benchmark for assessing the reliability of LMMs for evaluating AIGIs. \n- A-Bench consists of two subsets 1) A-Bench P1 to evaluate the text faithfulness or prompt adherence of T2I models and 2) A-Bench P2 to evaluate the quality of the generations.\n- Authors samples 2864 AIGIs from 16 open and closed source T2I models. For each generation, they sourced human experts to annotate question-answer pairs and computed the accuracies of popular proprietary and open-source LMMs. \n- Authors report that 1) Proprietary LMMs are better than open-source counterparts for text faithfulness, 2) proprietary LMMs perform as well as humans on simple enough prompts and 3) LMMs are not good models to evaluate the generation quality of AIGIs." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- I didn't find any major weakness with this work." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Q1) What is SRCC/PLCC in the introduction paragraph? It seems this abbreviation is never explained in the paper.\n\nQ2) In section 4.1, \"It’s worth noting that the instruction prompt might slightly differ for different LMMs according to the official setting.\" Why the instruction prompt is slight different for different LMMs? How will it impact the performance of LMMs?\n\nQ3) For the human annotators, how are they recruited? What kind of training were they given? How many instances are labelled by each human annotator? I am also interested in the total time required to build this benchmark." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "S1) The paper is well-written and well-organized. All the relevant details are included in the main paper and appendix. The evaluating method also seems rigorous.\n\nS2) A good pitch in studying LMMs directly through question answering instead of studying the effectiveness of certain LMM-based metrics. This helped to shed a light on the true capabilities of current LMM-based image evaluation metrics." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper study the multimodal LLM's ability in the context of image evaluation. Instead of studying the effectiveness of certain LLM-based metrics, This work aims to identify whether multimodal LLM (LMM) are truly capable of evaluating AI-generated images through a question answering benchmark. Proposed a benchmark that contains 2,864 set of questions which can be categorized into 6 categories, involving semantic understanding and quality perception. After benchmarking a total of 18 LMMs and comprehensive analysis, the authors came up with a conclusion that LMMs are still not masters at evaluating AI-generated images." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "W1) The paper would have provided more insights if the authors also studied the reasoning to verify if the LMMs truly understand how to evaluate for each categories (i.e. did the reasoning fully explain the choice made by the LMM?). This might help to explain the gap between the performance of LMMs and humans. I suggest conducting a study on small subset for each categories and see how the reasoning was aligned to the choice made.\n\nW2) It would also be desirable to see what kind of images LMM evaluate poorly across each categories in AIGI. A more detailed of diversity analysis on the AIGI dataset is required. E.g. for Basic Recognitions, how much portion of the questions are regarding recognitions of animal, human, or artifacts? Are these LMMs doing poorly on particularly certain type of objects?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024abench,\ntitle={A-Bench: Are {LMM}s Masters at Evaluating {AI}-generated Images?},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4muXQ5r8Ol},\nnote={under review}\n}" }, "abstract": { "value": "How to accurately and efficiently assess AI-generated images (AIGIs) remains a critical challenge for generative models. Given the high costs and extensive time commitments required for user studies, many researchers have turned towards employing large multi-modal models (LMMs) as AIGI evaluators, the precision and validity of which are still questionable. Furthermore, traditional benchmarks often utilize mostly natural-captured content rather than AIGIs to test the abilities of LMMs, leading to a noticeable gap for AIGIs. Therefore, we introduce **A-Bench** in this paper, a benchmark designed to diagnose *whether LMMs are masters at evaluating AIGIs*. Specifically, **A-Bench** is organized under two key principles: 1) Emphasizing both high-level semantic understanding and low-level visual quality perception to address the intricate demands of AIGIs. 2) Various generative models are utilized for AIGI creation, and various LMMs are employed for evaluation, which ensures a comprehensive validation scope. Ultimately, 2,864 AIGIs from 16 text-to-image models are sampled, each paired with question-answers annotated by human experts. We hope that **A-Bench** will significantly enhance the evaluation process and promote the generation quality for AIGIs." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Large multi-modal models", "AI-generated images", "Benchmark" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/38baaaa5ac18a00901b30b23e1ebdfac21191275.pdf" }, "presentation": null, "primary_area": { "value": "datasets and benchmarks" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "A-Bench: Are LMMs Masters at Evaluating AI-generated Images?" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4nU3BLG1ni
Multi-player Multi-armed Bandits with Delayed Feedback
main
Active
multi-player multi-armed bandits;delayed feedback
learning theory
3;3;5;6;6
4;4;4;3;4
2;2;2;3;3
2;2;2;3;3
1;1;3;2;3
4.6
3.8
2.4
2.4
2
-0.516047
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "My concerns are as follows:\n1.\tThe proposed algorithm takes a leader-follower approach which makes it semi-distributed in nature as there is a necessity of communication between the leader and the followers. There are works in the literature which can work without the requirement of a leader, e.g., \nTrinh, Cindy, and Richard Combes. \"A High Performance, Low Complexity Algorithm for Multi-Player Bandits Without Collision Sensing Information.\" arXiv preprint arXiv:2102.10200 (2021).\n2.\tWhat is the rationale behind Assumption 1? What are the components of delay? For example, does it contain queueing delay? How practical is the consideration of sub-Gaussian delay?\n3.\tThe authors have considered the fixed user setting where no users are allowed to enter or leave the systems. However, in a practical cognitive radio application, users may enter or leave the system. How does the proposed algorithm behave when user entering and leaving are allowed in the system?\n4.\tIt is not clear why there is a provision of eliminating arms for which LCB is bigger than UCB. Please specify the motivation behind the virtual communication phase in details.\n5.\tPlease provide a pointer to the result where an upper bound on the feedback delay is derived. This result has been used in Lemma 1. \n6.\tCan the authors quantify the gap between the lower bound and upper bound on the regret of the proposed algorithm? It will be more justified to call the proposed algorithm near-optimal then. \n7.\tSince the paper is highly motivated by cognitive radio applications, I expected some real wireless networks simulations (such as ns-3 simulations) where delays will be real delays in a wireless network." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The considered problem is well-motivated and the analysis appears to be sound." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors have considered the delayed feedback setting in multi-player multi-armed bandit problem, motivated by cognitive radio applications. A decentralized delayed successive elimination (DDSE) algorithm which takes into account stochastic delay, is proposed in the paper, and a regret bound is established. Contrary to existing algorithms, the proposed algorithm can avoid collision by adapting to delayed feedback. A corresponding lower bound on the regret is also derived. Experiment results are presented to demonstrate the efficacy of the proposed algorithm." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The proposed algorithm takes a leader-follower approach which makes it semi-distributed in nature as there is a necessity of communication between the leader and the followers. The authors have considered the fixed user setting where no users are allowed to enter or leave the systems. The modeling of delay could have been better." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. The writing is confused, many symbols are written incorrectly, and there are symbols that are not explained. For example, \n(1) On line 157 of page 3, $r^{j}(s)$ should be written as $r^{j}_{k}(s)$; $\\mu_{k}$;\n(2) what is the difference between $\\mu_{k}$ and $\\mu_{(k)}$;\n(3) The definition of $N_{t}(k)$ on line 210 of page 4 is error; \n(4) The $\\mathcal{M}_{0}$ in Algorithm 1 should be $\\mathcal{M}^{M}_{0}$;\n(5) What is the $\\mathcal{M}_{com}$ in Algorithm 1?\n\n2. The introduction of the Algorithm 1 is very confusing. For example,\n(1) What does the line 10 line of the Algorithm 1?\n(2) In the model, author claim that $M\\leq K$, but in the Algorithm 1, $|[K]|=M$ is used as a criterion for judgment. Please explain this issue." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "In order to address the challenge of delay in cognitive radio networks, this paper proposes a novel bandit framework where multiple players engage in a multi-armed bandit and if two or more players select the same arm, none of them receive the reward. In this framework, players receive feedback after a period of stochastic delay, which complicates their ability to learn and adapt in real time, making it exceedingly difficult to avoid collisions and optimize performance. To solve this problem, this paper designs a DDSE algorithm in multi-player multi-armed bandits with stochastic delay feedback and establish a regret bound of the proposed algorithm." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Multi-player multi-armed bandits have been researched for a long time due to their application in cognitive radio networks. In this setting, multiple players select arms at each time and instantly receive the feedback. Most research on this problem focuses on the content of the immediate feedback, whether it includes both the reward and collision information or the reward alone. However, delay is common in cognitive networks when users perform spectrum sensing. This paper designs a decentralized delayed successive elimination (DDSE) algorithm in multi-player multi-armed bandits with stochastic delay feedback and establish a regret bound. This algorithm enables players to adapt to delayed feedback and avoid collision." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper provides a series of technical results, but the writing is muddled and many key symbols are not explained. It is very hard to get some intuition about the approach and its possible advantages and disadvantages. In addition, the description of the algorithm is full of confusion, with many unexplained symbols inside.\n\n1. Why set the length of each communication phase as $K+2M$? The authors should explain the reasons for the design. If the length of communication phase becomes time-varying, will the methods in this paper still apply?\n\n2. The paper provides an analysis of the lower bound for centralized algorithm in Theorem 3, but lacks an analysis of the lower bound for decentralized algorithm, which should be the main focus of the paper.\n\n3. According to Theorem 1 and Theorem 2, DDSE has better convergence performance than DDSE without delay estimation. However, in larger scenarios (Fig. 4(d)), DDSE without delay estimation performs better than DDSE. What is the significance of considering delay estimation in delay estimation algorithms in large-scale scenarios?\n\n4. This paper lacks a description of the proof process for the theorems. In addition, the result of Theorem 1 is complex and the paper lacks specific explanations for these terms." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "It is hard to understand the DDSE algorithm. \n\n1. What is the duration of exploration, communication, and exploitation?\n2. Line 190 says, \"the best empirical set arm set of player j.\" How is this set defined?\n3. Line 204: \"To avoid collision with followers and ensure sufficient exploration, the leader first sequentially hops in the set of best empirical arms with followers.\" How is it ensured that the best empirical arm of leader and follower do not overlap? How is the collision avoided?\n4. How are collisions interpreted in the communication phase? Is it binary signaling?\n\n\nIn the experiment section, why are the algorithms in any of the following papers not considered?\n1. http://proceedings.mlr.press/v83/besson18a/besson18a.pdf\n2. http://papers.neurips.cc/paper/7952-distributed-multi-player-bandits-a-game-of-thrones-approach.pdf\n3. https://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=8737653\n\nMinor issues:\n\n1. Line 209: s<T or s<t?\n2. Is there any difference between sequential hopping and round-robin?\n3. Notation say [n]={1,2,..,n}. Then why |[K]| is M, not K?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The paper provides a detailed analysis of the algorithms and establishes a low bound. However, I could not verify all the claims due to presentation issues." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper studies the multi-player, multi-armed bandit problem. The difference from the studies is that the authors allow the feedback to be received with a random delay. \nThe authors develop an algorithm named DDSE and upper bound its performance. They establish that the algorithm is near optimal by deriving a lower bound." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The authors consider the multi-player multi-armed bandit problem with a leader-follower structure. Several authors explore this problem. The new dimension of delayed feedback is a minor extension. In addition, I have concerns about the following aspects:\n\n1. The literature review is not detailed: Several papers consider multi-player bandits with a more general heterogenous reward structure, which is well suited for cognitive radio networks. \n2. The algorithm is hard to understand: (see details below)\n3. The experiments section is weak: Why only compare with SIC-MMAB and not with other algorithms like Game-of-Thrones and Explore-Signal-Exploit Repeat" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Can the authors highlight the main technical challenges? Delay in the multi-armed setting is considered, while the reviewer agrees that the collision model does complicate things, in the technical level, how the analysis will be different is not clear." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. Bounding the regret in the multi-armed multi-agent bandit setup is challenging. The paper additionally consider the delay, hence, the contribution seems to be significant.\n\n2. The paper achieves the regret bound. \n\n3. Empirical results show the efficacy of the proposed approach." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper considers a multi-arm multi-player bandit setup with delayed reward. The paper proposes novel algorithms to counter the delay in receiving the reward. The paper bounds the regret in the decentralized setting." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper considers cognitive radio setup. However, cognitive radio is hardly used in practice, it is only of academic interest. Can the paper provide any other relevant examples?\n\n2. The paper is very hard to read, hence, the contributions are obscure." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1.\tIn cognitive radio networks, players are usually dynamic. Additionally, there are some hidden nodes (players) that are unknown to each other. In this case, will the DDSE algorithm still work? \n2.\tIf a player $j$ is waiting for the feedback from arm $k$ (i.e., $t<s+d_s^j$) and another player $l$ pulls this arm $k$, will there be a collision? If a collision occurs, will player $j$ fail to obtain a reward from arm $k$ after waiting $t-s$ time slots?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1.\tThe problem of MMAB with delayed feedback is well-motivated and highly relevant to real-world applications.\n2.\tIntroducing delayed feedback significantly increases the complexity of the already challenging MAB problem. The authors effectively decompose the regret to handle this complexity and present solid theoretical results.\n3.\tThe paper is well-written and easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies multi-player multi-armed bandit (MMAB) problem with delayed feedback, motivated by the application of cognitive radio networks. Unlike previous MMAB problems that assume instantaneous feedback from arms, this work tackles the challenge posed by delayed feedback. To overcome this challenge, this work proposes a decentralized delayed successive elimination (DDSE) algorithm, which operates in three stages: exploration, communication, and exploitation. The proposed DDSE algorithm enables players to adapt to delayed feedback and avoid collision. This work theoretically analyzes the upper bound of regret for the DDSE algorithm and further compares the regret with two benchmark cases: DDSE without delay estimation and centralized lower bound. By comparison, it shows that the DDSE achieves a near-optimal regret bound." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.\tMy main concern lies on the ID allocation for the leader-follower structure in the DDSE algorithm. If the central planner can assign an ID to each player, this DDSE algorithm is no longer fully decentralized. In many cognitive radio networks, sensing nodes are dynamic, and some nodes are even hidden or unknown to the network operator.\n2.\tThe communication assumption weakens the solution in this work. \n3.\tI suggest the authors to move Subsection 5.3 ahead of Subsection 5.1 for better logic, as the centralized lower bound serve as the benchmark.\n4.\tIn the experiments, the number of players $M$ is a relatively small compared to typical application of cognitive radio networks.\n5.\tIn the experiments, the authors simply compare DDSE with two methods that do not account for delay. This comparison may be somewhat unfair. If there is no other available algorithm, it would be better to compare DDSE with the benchmark centralized algorithm." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024multiplayer,\ntitle={Multi-player Multi-armed Bandits with Delayed Feedback},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4nU3BLG1ni},\nnote={under review}\n}" }, "abstract": { "value": "Multi-player multi-armed bandits have been researched for a long time due to their application in cognitive radio networks. In this setting, multiple players select arms at each time and instantly receive the feedback. Most research on this problem focuses on the content of the immediate feedback, whether it includes both the reward and collision information or the reward alone. However, delay is common in cognitive networks when users perform spectrum sensing. In this paper, we design an algorithm DDSE (Decentralized Delayed Successive Elimination) in multi-player multi-armed bandits with stochastic delay feedback and establish a regret bound. Compared with existing algorithms that fail to address this problem, our algorithm enables players to adapt to delayed feedback and avoid collision. We also derive a lower bound in centralized setting to prove the algorithm achieves near-optimal. Extensive experiments validate the effectiveness of our algorithm." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "multi-player multi-armed bandits", "delayed feedback" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/896be248595da7218445427106e8efb646cc2b72.pdf" }, "presentation": null, "primary_area": { "value": "learning theory" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/4430e6eee38bc8b6b4aea675e147076fe0f2834a.zip" }, "title": { "value": "Multi-player Multi-armed Bandits with Delayed Feedback" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4ndvumlZak
Closing the Gap between Neural Networks for Approximate and Rigorous Logical Reasoning
main
Active
neural reasoning;syllogistic reasoning;Euler diagram;composition tables;rigorous reasoning
neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)
1;5;5;5
3;4;3;2
1;2;2;2
1;2;1;3
2;2;3;2
4
3
1.75
1.75
2.25
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "The takeaway is as follows: \n\nCurrent deep-learning systems cannot and will not reach the rigour of logical reasoning, no matter what kinds of and how much training data we use. To achieve the rigour of logical reasoning, traditional neural networks shall do qualitative extensions, namely, to promote vector embedding to non-vector embedding. \n\nThe ‘sketched proof’ does not prove transformers cannot do syllogistic reasoning. \n\nLLMs work very well, in terms of language communication, but this does not follow that they can reason well. see the reference below. \n\nEvelina Fedorenko, Steven T. Piantadosi, and Edward A. F. Gibson (2024). Language is primarily a tool for communication rather than thought. In Nature.\n\nThis paper takes syllogistic reasoning as the micro-world of rationality and shows current deep-learning systems cannot and will not reach the rigour of syllogistic reasoning. \n\nSiamese architectures are used for object recognition and for syllogistic reasoning. In both cases, they achieve excellent results. However, The phenomena described in Section 4.1 raise the problem -- These single green circles are different from the standard inputs (two circles). Surprising is that the well-trained Euler-Net may automatically complete a single green circle into standard inputs. For object recognition, this is a great capability – it can recognise objects by observing its partial image (we do not say, partial images are out-of-distribution inputs). But, for reasoning, this capability shall not be allowed, because the neural networks shall not add new premises. \n\nLine 357: new randomly generated test data have different distributions from the training data.\n\nLine 359: The motivation is to let Euler Net improve its performance by itself. It is not difficult to create an image with two circles, given two centre points and two radii. \n\nThe theorem is solidly proved using region-based spatial logic. The proof shall be independent of model architectures." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "We are not sure whether we correctly understand your question. One-hot representation reduces the amount of training data, compared with using image representation." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "Yes. The phenomena described in Section 4.1 are entirely different from the out-of-distribution scenarios. These single green circles are different from the standard inputs (two circles); in this way, they are out-of-distribution and will perform incorrectly, as we expect. Surprising is that the well-trained Euler-Net may automatically complete a single green circle into standard inputs. For object recognition, this is a great capability – it can recognise objects by observing its partial image (we do not say, partial images are out-of-distribution inputs). But, for reasoning, this capability shall not be allowed, because the neural networks shall not add new premises. \n\nIf using (non-)vector or vector feature embeddings and the output embeddings oversmooth, then the converged output embedding must be a single vector feature embedding (a point). Or, put it this way: if feature embeddings are spheres with radii >=0, and output embeddings oversmooth, then their radii = 0. This means if we restrict radii > 0, oversmoothing will not happen. \n\nAfter researchers promote vector embeddings into spheres and introduce the method of reasoning using model construction, neural models achieve rigorous syllogistic reasoning without training data, see, Sphere Neural-Networks for Rational Reasoning https://arxiv.org/abs/2403.15297" }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "Promoting traditional vector embeddings into manifold embedding is the first step. The second step is to introduce the method of reasoning as model construction, see, Sphere Neural-Networks for Rational Reasoning https://arxiv.org/abs/2403.15297\n\nHere, we show the limitations of (1) the vector representation, and (2) the method of reasoning through combination tables. Both prevent neural networks from achieving rigorous reasoning, which goes beyond the statistic metrics -- more data experiments will not help. Three statements being unsatisfiable (contradictory) is a topic of possibility, not probability -- no training data for deciding unsatisfiability." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please see above." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The paper presents an important question that the community really cares about.\n- The author shows the equivalence between syllogism reasoning and part-whole relations, and converted reasoning task into a visual prediction problem, which is interesting to me." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a task that converts syllogism into subset relations and then generates an image dataset that visualizes the subset relations and evaluates neural networks. The authors show in their experiments that although Euler Networks can learn part-whole relations between two entities, it cannot learn complex combinations of these relations, resulting in a lack of validity in the equivalent syllogism reasoning. Furthermore, the authors hypothesized that NNs should use one-hot representation to acquire the rigorous reasoning ability." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- This paper still lacks enough experiments to support the authors' claims. Why would a one-hot representation save neural nets in reasoning soundness issues?\n- The presentation of this paper could be further improved. The structure of it now looks more like a technical report. It lacks of figures and charts to present the experimental results.\n- The discuss is high-level, while the technical detail or insufficiency of the compared methods are not discussed enough." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Are the phenomena described in Section 4.1 distinct from typical out-of-distribution scenarios?\n\n2. In Section 5 (lines 502-519), what is the relationship between using (non-)vector feature embeddings and output embeddings being points?\n\n3. Given that symbolic approaches are effective for syllogistic reasoning, why is it necessary for neural models to also support rigorous reasoning? In Section 2.1 (line 181), the authors argue that \"symbolic approaches neither explain how symbols emerge from our neural minds nor capture the ways humans reason in daily life.\" Can neural models genuinely achieve these objectives?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The authors substantiate their claims with experimental results, showcasing the shortcomings of existing models, such as the Siamese Masked Autoencoder, in achieving high accuracy in syllogistic reasoning.\n- The paper opens avenues for further exploration, encouraging researchers to develop architectures that can effectively address rigorous reasoning tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors highlight the limitations of neural networks, including large language models (LLMs), in achieving rigorous syllogistic reasoning, which is essential for logic and human rationality. They argue that these networks should avoid combination tables and instead use non-vector embeddings to prevent oversmoothing. The paper reviews the Siamese Masked Autoencoder and presents experiments demonstrating that models relying on combination tables cannot attain 100% accuracy in syllogistic tasks. However, using non-vector embeddings as computational building blocks can help neural networks avoid oversmoothing. This work aims to bridge the gap between neural networks for approximate and rigorous logical reasoning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The authors claim three main contributions, and there are corresponding weaknesses for each:\n\n - **Contribution 1:** The authors conduct an experiment in Section 4. However, the experiments in Sections 4.1 and 4.2 appear to primarily test neural models' performance on out-of-distribution inputs. The poor performance of neural models on out-of-distribution inputs is already well-documented, which limits the novelty of this contribution.\n\n - **Contribution 2:** The use of combination tables is discussed in Section 4.3, but this section is confusing. For example, the authors state that the combination table only generates the conclusion \"all V are U\" is not enough, since it misses the conclusion “some V are U.” However, the statement \"all V are U\" clearly describes a part-whole relationship, and \"some V are U\" can be derived from \"all V are U.\" The authors did not explain why this senario is worse.\n \n - **Contribution 3:** The authors discuss this in Section 5 (lines 502-519), but the proof is unclear. For example, it's unclear how the two theorems prove \"using non-vector feature embedding to avoid oversmoothing\". Additionally there lacks empirical studies to support it." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "No Ethics Concerns." }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "In fact, enhancing the inference capabilities of neural networks is a very challenging task. Will merely changing traditional vector embeddings yield significant improvements, or can it lead to substantial advancements?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This paper attempts to analyze and study the reasoning capabilities of transformers, which is of great value. Additionally, the methods proposed in this paper possess certain innovative and theoretical significance." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper discusses the \"dual-process\" theory of mind, highlighting the distinction between fast, intuitive thinking and slower, more deliberate thinking. It conclude that LLMs and\nFoundation Models built upon Transformers cannot reach the rigour of syllogistic\nreasoning. \nThe article proposes a method of transforming syllogistic relationships into \"part-whole relationships\" and suggests using non-vector embeddings instead of traditional vector embeddings to avoid the problem of \"oversmoothing.\" Oversmoothing can cause the outputs of neural networks to converge to similar embeddings, thereby affecting the accuracy of reasoning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. This work lacks experimental validation and seems to be not fully complete.\n\n2. The article is not clearly written. The abstract and introduction are somewhat verbose, and the key innovations and objectives are not clearly defined." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- What is the motivation for specifically studying this Siamese Masked Autoencoder model? I suppose that this model does not use specific embeddings for each object (unlike models in object-centric learning, involving eg slot attention [1] or the method specific for this task as cited [2])\n- Line 357: \"We fed new randomly generated test data' How is this data different?\n- Line 359: What's the motivation for Euler Net version 2? The description of this method is extremely difficult to follow and incomplete. How does a model 'generate' input images?\n- 4.1, first paragraph. This lacks in details. Furthermore, it's well known that standard NNs are not adversarially robust. This connection is missing. \n- 4.2: I did not understand the point of this experiment. Of course a model will not be able to say anything meaningful about incorrect input data that we never defined how to respond to, especially if it's not designed for out of distribution detection. \n- Line 428: This blanket statement is highly overclaiming these results. This is about misspecification - not a lack of learning capability. \n- 4.3: It is not clear to me how these combination tables are defined from a neural network point of view. Furthermore, this result again comes from the design of the neural network. If it's allowed to output multiple answers (for instance like an LLM would be able to), it may give all syllogistic conclusions. \n- 479 \"More powerful than vanilla RNN, LSTM\": From a theoretical perspective, this is hard to claim. RNNs (with unbounded time) are Turing Complete [3]. Similar results exist for Transformers, but these require an infinite 'scratchpad / chain of thought' [4]. I suppose this 'powerful' refers to an empirical interpretation, but this should be clarified. \n- Theorem 1 is unclear and informal, and does not properly state its assumptions. What is oversmoothing? Output embeddings? \"will be points\"? Of course output embeddings are points. What are the assumptions on the model architecture? A quick look at the proof did not help me understand these questions. This certainly doesn't constitute a 'rigorous proof\" (Line 531)\n- Similarly for Theorem 2, I have no idea what \"If the output embeddings are not points\" would mean. \n\n[1] Locatello, Francesco, et al. \"Object-centric learning with slot attention.\" Advances in neural information processing systems 33 (2020): 11525-11538.\n\n[2] Wang, Duo, Mateja Jamnik, and Pietro Lio. \"Abstract diagrammatic reasoning with multiplex graph networks.\" arXiv preprint arXiv:2006.11197 (2020).\n\n[3] Nowak, Franz, et al. \"On the representational capacity of recurrent neural language models.\" arXiv preprint arXiv:2310.12942 (2023).\n\n[4] Lena Strobl, William Merrill, Gail Weiss, David Chiang, Dana Angluin; What Formal Languages Can Transformers Express? A Survey. Transactions of the Association for Computational Linguistics 2024; 12 543–561." }, "rating": { "value": 1 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The paper is fairly well written, with some clear figures." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors study whether current neural networks can perform robust syllogistic reasoning via Euler diagrams, showing that they fail in very specific aspects, and conclude with arguments stating that neural networks need to go beyond vector embeddings to solve rigorous reasoning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I found it hard to follow what the contributions of this paper are. There are a few results that seem simple, arbitrary, poorly explained, and relevant only to a single network architecture. It is not clear to me what I should take home from these experiments. \n\nThe 'sketched proof' which is supposed to prove that transformers cannot do syllogistic reasoning also falls short: It assumes that they oversmooth, which only happens for transformers with many layers (the theoretical results are for the infinite-depth setting). If this happened consistently in practical transformer models, there is no chance LLMs could work as well as they do (as also Dovonon 2024 argues and shows, which is cited). \n\nTogether, this paper only provides meagre evidence for the infeasibility of syllogistic reasoning. Then the authors argue that different concept embeddings are needed, but do not compare (either theoretically or empirically) to the vector case, except for referring quickly to related work." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "this paper analyses features of neural networks for rigorous reasoning should and should not have." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024closing,\ntitle={Closing the Gap between Neural Networks for Approximate and Rigorous Logical Reasoning},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4ndvumlZak},\nnote={under review}\n}" }, "abstract": { "value": "Despite the historical successes of neural networks, the rigour of logical reasoning is still beyond their reach. Though this is consistent with the dual-process model of the mind, which separates the model for rigorous reasoning from that for approximate associative thinking, a systematic explanation is still missing in the literature. We review syllogistic reasoning and its irreplaceable role in logic and human rationality, show existing neural networks cannot reach the rigour of syllogistic reasoning, and propose features that neural networks for rigorous reasoning should and should not have. (1) They should not use combination tables: We reduce syllogistic relations into part-whole relations, and translate the criterion of rigorous syllogistic reasoning into a deterministic process of Euler diagram construction in vector space. Then, we survey recent neural architectures (Siamese Masked Autoencoder) for reasoning part-whole relations in object completion and degrade the task into reconstructing Euler diagrams for syllogistic reasoning. We dissect Euler Net (EN), the Siamese (Masked) Autoencoder for syllogistic reasoning, and report three experiments, showing that EN, utilising a pre-designed combination table, cannot reach 100\\% accuracy for testing data without restriction. As Transformer's Key-Query-Value structure is a combination table, we conclude that LLMs and Foundation Models built upon Transformers cannot reach the rigour of syllogistic reasoning. (2) They should use non-vector embedding as computational building blocks: Transformer's oversmoothing prevents any neural architecture built upon them from reaching the rigour of syllogistic reasoning. We prove, however, that in the setting of part-whole relations, if neural networks use non-vector embedding as computational building blocks, they will not oversmooth. This work suggests a new way to close the gap between neural networks for approximate and rigorous logical reasoning." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "neural reasoning", "syllogistic reasoning", "Euler diagram", "composition tables", "rigorous reasoning" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/ff25edd879f5dfb185a30e132ce7f2281bc39548.pdf" }, "presentation": null, "primary_area": { "value": "neurosymbolic & hybrid AI systems (physics-informed, logic & formal reasoning, etc.)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/5cb02d1f6a51ee1d10bfc96e6cb9bb755a6b5c80.pdf" }, "title": { "value": "Closing the Gap between Neural Networks for Approximate and Rigorous Logical Reasoning" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4nrcn0YoDG
Global Identifiability of Overcomplete Dictionary Learning via L1 and Volume Minimization
main
Active
Dictionary learning;overcomplete;sparse;identifiability
unsupervised, self-supervised, semi-supervised, and supervised representation learning
5;6;6;8
3;4;3;3
2;3;3;3
3;3;3;3
2;3;3;3
6.25
3.25
2.75
3
2.75
-0.132453
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Detailed Comments:\n\n- Maybe recall what complete and overcomplete (no orthogonality) dictionary mean\n- Formulation (2) should be better introduced. Why is \n- line 106, you say that A should be a dictionary that guarantees exact recovery of all s-sparse vectors. Do you mean that min ||x||_1 s.t y= Ax should have a unique solution for all s-sparse vectors?\n- line 107, what is the cellular hull?\n- you should clarify the notion of scattered cellular hull before introducing your results.\n- Statement of Lemma 1 is misleading. First of all, from what I understand the weights d_{*c} reach the maximum of \\sum_c d_c ||e_cS_*|| under the constraint \\|d\\|\\leq m. Secondly, if the max is attained for (3), why not just optimize the l1 norm squared?\n- Is it always possible to scale the columns of A_{\\#} and rows of S_{\\#} to satisfy (5). This is not obvious to me\n- If I understand well you want the set S to be reduced to canonical vectors p? and S could include vectors that are not in the span of Q but all vectors in span(Q) must be of the form q/||q||?\n- From your definition of B_m, the set is a subset of R^k (i.e. it is given by some linear combinations of the columns of Q). Moreover S is also a subset of R^k so how can the intersection of those subsets be a subset of R^m (i.e given by rows of Q)? Maybe you mean the columns of Q?\n- line 158-159, I would add just one sentence, to explain that for the correlation to be maximum, you need the cosine of the angle between the vectors to be maximum which implies d_c = \\alpha \\|e_c^T S_*\\| for all c\n- lines 164-165, there are alphas missing. \n- line 178 and Figure 1. If I understand well, the set B_m is an intersection of spheres of dimension m. If my understanding is correct, I think it would be worth mentioning it somewhere because it looks as if the points clouds in Fig 1 have non empty inerior (especially the 2-strongly scattered one) while my guess is there are empty. \n- lines 241-244, in your proof sketch, again if I understand well, you define your matrix Q from the left factor of the SVD of A_#. I.e. if you have A_# = U\\Sigma V^T, then you define Q as V. Then why not say it like that. I feel this is simpler and much more clear\n- On line 248, you refer to assumption 4 which does not appear anywhere (the hyperlink does not work)\n- line 251-253, shouldn’t the pseudo inverse be applied on the right of S_*, i.e. from line 252, the dimensions of W seems to be n\\times n to me. Moreover, what you need to project to have the decomposition of line 251 are the rows of S_* not the columns. \n- One lines 268-269, if I’m not wrong you mulyiply both sides by S_# and not S_*\n- On line 272, there is a transpose missing on the second A_#\n- On line 272, the last equality in Equation (8) is not completely clear to me. Isn’t ||e_c^T S_*||_1 = ||w_c^T S_#||_1 and not ||e_c^T S_#||_1 ? why is ||w_c^T S_#||_1 = ||e_c^T S_#||_1 ? Does the relation follow from (5) and the fact that A_# = A_*D\\Pi ? It would help to have even a short additional explanation here.\n- lines 303-305, I don’t understand the sentence. You say that the sparsity is implicitely implied in (5)? How come ?\n- lines 302 - 303 should be rephrased. I think what you mean is that “sparsity is required to have the strongly scattered condition used in the statement of Theorem 1” instead of “sparsity is implied in Assumption 1”\n- line 308 “does not necessarily mean that the sparse coefficients S_# is identifiable” —> “are identifiable” ?\n- lines 313 -320, Assumption 4 seems quite strong (or quite vague) on the dictionary. Is it easy to find such dictionaries? (I.e. you don’t provide any numerical illustration). It would be perhaps good to have a short comment such as the one at the beginning of section 2.3\n- lines 339-340, “the most crucial condition is assumption 3 that cell ..” —> “the most crucial condition is assumption 3, or the fact that cell(S_#) should be generated …”?\n- lines 341-342: “and show that when it satisfied assumption 3” —> do you mean “and show that it satisfies assumption 3”?\n- Section 2.3., lines 337-346, I don’t really understand why, if you can make it work in the sparse Gaussian model, you can’t make it work in the Bernoulli Gaussian model. If the probability in the Bernoulli distribution is set to s/n, can’t you get a result similar to what you have with sufficient probability? Even if you can’t be at least s-sparse, isn’t “at least s-sparse” with sufficient probability enough?\n- line 348 - 349 “if for every column of S” —> “if every column of S”\n- lines 362-363 : “is equal to” or “equals” but not “equals to” \n- line 380, I would remove the line “which is a good sign that the bound is tight ”\n- line 383 “even if identifiability of S_# is not required”, what do you mean “is not required”? Aren’t all your result focusing on the identifiability of S_# ? i would remove the paragraph starting from “On the other hand” because it makes everything unclear.\n- line 389 - 390, the sentence “Due to the novel formulation (2) for overcomplete …” does not make sense either. Do you mean “We will now design an algorithm for formulation (2) for which uniqueness (up to permutation and scaling) of the dictionary and sources was shown above”\n- line 427 “which is not preferable as one step of an iterative algorithm ” just remove." }, "rating": { "value": 8 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper seems mathematically sound (be careful with the dimensions, see the detailed comments below). Its positioning with respect to the existing literature should be better documented though. Two results appear as particularly related: Hu and Huang 2023 and Agarwal et al/ Rambhatla et al. It would help to have a clear discussion on the improvement of the paper compared to those results." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a new formulation for the overcomplete dictionary learning problem. The authors show global identifiability of the dictionary and sources up to permutation and scaling provided that the atoms are sufficiently sparse." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Sparse coding or sparse dictionary learning are not new" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Given that the \"sufficiently scattered\" condition has been previously introduced in NMF and topic modeling, and that similar identifiability conditions appear in [1,2], could the authors discuss the specific technical challenges posed by applying this condition in the dictionary learning (DL) setting compared to the NMF/topic modeling context? \n\n[1] Kejun Huang, Nicholas D Sidiropoulos, and Ananthram Swami. Non-negative matrix factorizationrevisited: Uniqueness and algorithm for symmetric decomposition. IEEE Transactions on Signal Processing, 62(1):211–224, 2013.\n\n[2] Kejun Huang, Xiao Fu, and Nikolaos D Sidiropoulos. Anchor-free correlated topic modeling: Identifiability and algorithm. Advances in Neural Information Processing Systems, 29, 2016.\n\n[3] P. Georgiev, F. Theis and A. Cichocki, \"Sparse component analysis and blind source separation of underdetermined mixtures,\" in IEEE Transactions on Neural Networks, vol. 16, no. 4, pp. 992-996, July 2005" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The idea is well-motivated, and the problem is relevant to the community. While previous work typically relies on column incoherence for $A$, the authors propose a novel sufficient condition of $S$ for the global identifiability of the over-complete dictionary learning problem under their formulation. This is achieved by extending the \"sufficiently scattered\" condition from non-negative matrix factorization (NMF) to the context of dictionary learning." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper addresses the identification problem in over-complete dictionary learning by introducing a new formulation. The authors primarily build on the analysis from [Huang & Hu, 2023], extending the concept of \"sufficiently scattered\" to the over-complete setting. By combining this extension with scaling and independence conditions for $A$ and $S$, the authors argue that \"sufficiently scattered\" serves as a sufficient condition for the identifiability of $A$ under the proposed formulation (2). Additionally, they provide a theoretical guarantee that this \"sufficiently scattered\" condition holds with high probability under the commonly used Bernoulli-Gaussian distribution." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1) The connection between the proposed \"sufficiently scattered\" condition and the conditions outlined in [3] remains unclear. Could the authors clarify this relationship?\n\n2) The paper appears to be incomplete. For instance, the figure for the experimental section is missing, and in line 187, it seems that $\\mathcal{S} \\subseteq \\mathbb{R}^k$ should be used." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Line 165: is a square power missing outermost in the second term? Why does this line imply $\\alpha = 1$?\n\nLine 171: Why is Assumption 1 reasonable? Is this equality always possible? If so, can that be shown as a lemma?\n\nLine 188: if $\\mathcal{B}_m \\subseteq \\mathcal{S}$, then isn't $\\mathcal{B}_m \\cap \\mathcal{S} = \\mathcal{B}$?\n\nLine 246: Assumption 4 has not yet been introduced - can you move the definition earlier?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The formulation appears novel and the analytical results are comprehensive.\nA sound identifiability condition is presented." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper proposes an approach for dictionary learning that uses a loss that mixes a modified, weighted version of the ell-1 norm of the mixture matrix coefficients (with different weights for different rows) with the volume of the dictionary matrix. It identifies a condition for successful identification of the mixing matrix called strong scattering. Similar to existing results, the likelihood of strong scattering for random mixing coefficient matrices such as sparse Gaussian, finding a scaling low for the number of vectors used in learning to scale like $\\mathcal{O}\\left(\\frac{k^2}{m} \\log \\frac{k^2}{m}\\right)$, where $k$ is the number of dictionary elements and $m$ is the data dimension. An alternating minimization algorithm for the proposed optimization is included as well." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "As with other conditions for sparse learning and recovery, it appears that the required strong scattering condition cannot be efficiently checked.\n\nIt is difficult to assess how much stronger the sufficient scattering condition is versus \"that of complete dictionary learning\".\n\nSome specific arguments are not clear (see questions).\n\nA figure in the experimental section (cf. Line 466) is missing." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. In Lemma 1, it seems that $\\Phi=I$ only when the optimal solution to equation 2 is unique. Hence, if there are multiple optimal solutions, does Lemma 1 still hold? If not, how to demonstrate that the optimal solution to equation 2 is unique? \n2. How to prove that $A$ in Assumption 4 must exist? In addition, note that $A$ needs to satisfy Assumption 1 as well.\n3. In line 363, the authors state that they aim to check whether the optimal value of equation 12 equals to 1. However, Theorem 2 only gives the probability that the maximum value is greater than 1. What's the relationship between them?\n4. Are optimization problems 14 and 2 equivalent? How to determine $\\lambda$?\n5. For the synthetic experiment, using the estimation error to evaluate the algorithm's performance is somewhat unconvincing. It is more reasonable to show that there exist a permutation matrix and a diagonal matrix that can convert the learned dictionary into the real one. In addition, multiple experiments should be conducted to record the corresponding success probability. \n6. Why didn't the authors compare the proposed algorithm with other dictionary learning algorithms in the experiment? Currently, only a simple experiment is available.\n7. Where is the Figure mentioned in line 466?\n8. Many sentences in Introduction overlap with Hu and Huang (2023a)." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "It is impressive that the proposed formulation can guarantee global identifiability over dictionary learning with an overcomplete dictionary matrix under some conditions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents a novel formulation for dictionary learning with the dictionary matrix being overcomplete. Under certain conditions, the authors demonstrate that the novel formulation guarantees global identifiability on the overcomplete dictionary. Finally, the authors design an alternating optimization algorithm to solve the proposed formulation." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. It is not easy to verify whether $A$ and $S$ satisfy the Assumptions 3-4. Hence, it is difficult to evaluate the practical applicability of the theoretical results. \n2. The paper provides only a simple simulation experiment, and the results are somewhat unconvincing.\n2. The theoretical results are related to the optimal solution to equation 2. However, the proposed optimization algorithm for solving equation 2 cannot guarantee convergence to a global optimum." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Overcomplete dictionary learning is globally identifiable via the proposed L1+Volume minimization if the sparse coefficients are strongly scattered in the hypercube." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024global,\ntitle={Global Identifiability of Overcomplete Dictionary Learning via L1 and Volume Minimization},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4nrcn0YoDG},\nnote={under review}\n}" }, "abstract": { "value": "We propose a novel formulation for dictionary learning with an overcomplete dictionary, i.e., when the number of atoms is larger than the dimension of the dictionary. The proposed formulation consists of a weighted sum of $\\ell_1$ norms of the rows of the sparse coefficient matrix plus the log of the matrix volume of the dictionary matrix. The main contribution of this work is to show that this novel formulation guarantees global identifiability of the overcomplete dictionary, under a mild condition that the sparse coefficient matrix satisfies a strong scattering condition in the hypercube. Furthermore, if every column of the coefficient matrix is sparse and the dictionary guarantees $\\ell_1$ recovery, then the coefficient matrix is identifiable as well. This is a major breakthrough for not only dictionary learning but also general matrix factorization models as identifiability is guaranteed even when the latent dimension is higher than the ambient dimension. We also provide a probabilistic analysis and show that if the sparse coefficient matrix is generated from the widely adopted sparse-Gaussian model, then the $m\\times k$ overcomplete dictionary is globally identifiable if the sample size is bigger than a constant times $(k^2/m)\\log(k^2/m)$, where $k$ is the number of atoms in the dictionary, with overwhelming probability. Finally, we propose an algorithm based on alternating minimization to solve the new proposed formulation." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Dictionary learning", "overcomplete", "sparse", "identifiability" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/aca0e2181230f99c985ed7d00ab5c51f8c22ac6f.pdf" }, "presentation": null, "primary_area": { "value": "unsupervised, self-supervised, semi-supervised, and supervised representation learning" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/80337f067612be90c8b04820f3589758368b2bac.pdf" }, "title": { "value": "Global Identifiability of Overcomplete Dictionary Learning via L1 and Volume Minimization" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4o4fDJL6I7
Evaluating Ranking Loss Functions in Performance Predictor for NAS
main
Withdraw
Neural Architecture Search;Performance Predictor;Loss Function
applications to computer vision, audio, language, and other modalities
Han Ji;Yuqi Feng;Jiahao Fan;Yanan Sun
~Han_Ji2;~Yuqi_Feng1;~Jiahao_Fan4;~Yanan_Sun4
3;3;5;5
4;3;4;2
2;2;3;3
2;1;3;2
2;1;2;3
4
3.25
2.5
2
2
-0.301511
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. What is the scale of the N@k metric? My read of the definition is it’s the “true rank” of the architecture then it relies on the context, what are the architectures ranked together with the one on list and what is the size of the context?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. Comprehensive work with extensive experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper investigates the effectiveness of various ranking loss functions in performance predictors within Neural Architecture Search (NAS). In specific, this paper compares 11 ranking loss functions, including pointwise, pairwise, listwise, and weighted categories, across multiple search spaces and metrics to identify the most effective for NAS. The study finds that ranking loss choice significantly impacts predictor performance, particularly in discovering high-quality architectures. The paper finds that the top-centered metrics are better suited for NAS tasks than traditional metrics, emphasizing the predictor's ability to identify high-performing architectures. These findings help guide the selection of ranking losses, improving the efficiency and accuracy of predictor-based NAS methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Overall, I personally find it’s hard to justify the potential impact of the work. This is not the first work studying about ranking losses in efficient neural architecture search or autoML in general. Ranking losses haven’t been widely used in NAS or autoML likely because still lack of significant and consistent gain from ranking losses in practice.\n\nIn addition, I found the following points making the paper hard to read and understand by general audience.\n1. Mathematical definitions of both losses and metrics are missing, not even in appendix. I had to refer to other papers. Without math definitions, details of the metrics are hard to understand. For example, N@K is the lower the better, which is only mentioned in the caption of Figure 4, likely to confuse many readers at the beginning.\n2. Color code of the results are confusing. For example, the color code in Figure 1 appears to highlight the very bad ones, like MSE on TB101_MACRO-AUTO dataset. However, I believe what’s more relevant is the best loss on each dataset. By just scanning, hard to see any pattern showing ranking losses superior on NAS.\n3. Figure captions are not very informative. Important explanations are missing. For example, by just looking at Figure 2 and 4 and their captions, almost impossible to understand why colors are shown on Train-test portion grids. And thus hard to get what these colors are trying to tell." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Suggestion: move the Loss Function in Table 4 to the second column from the left. Perhaps also move “Search Method” to the third column." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "* The comparison of multiple ranking losses seems comprehensive, covering 11 losses.\n* Some of the proposed weighted losses show promising NAS results.\n* The paper is clearly written." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Disclaimer: I have never worked on NAS, not sure why this paper was assigned to me. Providing a high-level, low confidence review.\n\nOverview:\nThis paper studies ranking losses for training predictors used in neural architecture search (NAS). Specifically, a search algorithm uses a predictor to evaluate candidate architectures since proper evaluation is often very expensive. Several ranking losses are compared, including pointwise, pairwise, and listwise losses. The paper argues that using weighted losses, which place more weight on top-ranking architectures, as opposed to simply ranking them overall, yields better performance than other losses.\n\nA thorough comparison of several ranking losses for NAS may be an interesting contribution, but there could be some concerns regarding novelty." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* I am not an expert on NAS but adding a weighted ranking loss to previously proposed ranking losses may be somewhat Incremental (weighted vs. non-weighted), especially since improvement in performance compared to baselines seems rather small.\n* The results are sometimes hard to interpret. For example, looking at Figure 1, it is hard to say if there is a loss which performs well across multiple tasks. Perhaps try a bar plot or a line plot? As another example, figures 2 and 4 show the winning loss for a combination of train portion, test portion, and task, and it is hard to identify clear trends in the multitude of results." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. How can we utilize the conclusion \"ranking loss with excellent performance on top-centered rank metrics can help find architectures with high quality\" to guide the future design of NAS methods or the design of loss functions?\n2. Can you explain the obtained insights from the mathematical essence of different loss functions?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is easy to read.\n2. The experiments are comprehensive." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper evaluates different ranking loss functions in performance predictors for Neural Architecture Search (NAS) and also draws some insights." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The paper conducts comparative experiments and analyzes existing loss functions for assessing the performance of Neural Architecture Search (NAS). However, the overall innovation and contribution of the paper are limited, with no new contributions in terms of evaluation methods, conclusions drawn, or new methods derived from the evaluation results. The two insights obtained through experiments also lack persuasiveness. The first insight, the importance of ranking loss in performance predictors, is widely recognized. It is precisely because people recognize the importance of ranking loss for NAS that there has been continuous iteration and the proposal of various ranking losses. The second insight, that ranking loss with excellent performance on top-centered rank metrics can help find high-quality architectures, is also quite straightforward. Does this insight imply that top-centered rank metrics should be used in the design of NAS methods? If the conclusion relies solely on experimental evaluation, can it stand up? Is there any theoretical support?\n\nI suggest that having a clear or more in-depth conclusion regarding the loss function would be more persuasive, such as what kind of model or predictor is suitable for what kind of ranking loss, or analyzing the mathematical principles of different loss functions to further propose what principles we should follow when designing ranking loss functions.\n\nOverall, I believe this paper does not make any special contributions in terms of experimental setup, conclusions drawn, and method design, and I think it does not meet the standards of ICLR." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See the \"Weaknesses\" section." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The paper systematically studies the application of ranking loss functions in NAS, which is an important and noteworthy issue.\n- The paper conducts fair performance comparisons among 11 ranking loss functions, including pointwise, pairwise, listwise, and weighted ranking loss, covering most types of ranking loss functions.\n- The paper employs various evaluation metrics, including the traditional global metric Kendall Tau, as well as ranking-based metrics like Weighted Kendall Tau, Top-$K$ metrics N@$K$, and Rel@$K$, providing a comprehensive assessment of the loss functions' performance.\n- The paper conducts extensive experiments to benchmark the effectiveness of ranking loss functions on NAS tasks, accompanied by detailed analysis.\n- The structure of the paper is clear and straightforward." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper aims to provide a comprehensive benchmark and detailed analysis of ranking loss functions for training performance predictors in neural architecture search (NAS). Specifically, the authors compare 11 ranking loss functions (including pointwise, pairwise, listwise, and weighted ranking loss) across 5 NAS search spaces and 13 corresponding NAS tasks. Notably, the authors employ various evaluation metrics, including global, rank-weighted, and Top-$K$ metrics, emphasizing the importance of using top-centered metrics for NAS tasks. Additionally, the authors evaluate the practical performance of performance predictors trained with each loss function on two NAS frameworks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "However, before this paper can be accepted, I still have the following **major concerns**:\n\n**Presentation:** The definitions of the loss functions and evaluation metrics are very vague, which is detrimental to reproducibility. While some intuitive explanations are provided in Section 3, the lack of formal mathematical definitions in Appendix A is quite confusing.\n\n- For example, in the definition of Weighted Approximate-Rank Pairwise (WARP) in Appendix A, the authors state, \"If the incorrect pair appears soon, we think the predictor is poor at ranking and will assign a large weight to this sample when calculating the loss\". How exactly is this weight calculated? I couldn't find the details anywhere in this paper.\n- Another example is the even more ambiguous definition of metrics in Section 3.2. For instance, I can't understand the statement in Weighted Kendall Tau about \"There is a hyperbolic drop-off in architecture importance according to the descending order of accuracy\", or \"Rel@K computes the ratio of the accuracy of architecture $A_K$ to that of the best one $A_{max}$\". The authors should not shy away from using mathematical symbols and instead replace them with confusing textual descriptions --- at the very least, precise definitions should be available in the appendix.\n\n**Experimental settings:** I still have the following concerns:\n\n- For a fair comparison, the authors use the same performance predictor setting, including the same learning rate `lr` and weight decay `wd` (Appendix B.2). However, this is inherently unfair for comparing loss functions, as different `lr` and `wd` lead to different losses. In fact, different losses are sensitive to `lr` and `wd`. For example, in information retrieval practices, pairwise loss typically requires a lower `lr` and `wd`, while listwise loss needs a higher `lr`. The authors should compare the loss functions across a wider range of hyperparameters and provide a sensitivity analysis to ensure a fair and comprehensive comparison.\n\n- The authors test only on one performance predictor composed of a four-layer GCN encoder and a three-layer MLP, which is somewhat limited. I recommend that the authors conduct experiments on more types of performance predictors to verify the consistent performance of the loss functions across different networks.\n\n**Metrics:** The authors introduce various metrics to evaluate performance, emphasizing that Top-$K$ metrics are more effective for practical NAS tasks. However, there are additional Top-$K$ ranking metrics in recommender systems which need to be considered:\n\n- NDCG and NDCG@$K$ are the most commonly used metrics in information retrieval and recommendation systems. Many ranking loss functions are designed based on them, which are fundamentally different from the accuracy-based metrics listed in the paper. In fact, with slight modifications, NDCG can be adapted for evaluation in NAS. Specifically, by sorting architecture-performance pairs $(x_i, y_i)$ according to the predicted performance $\\hat{y} _i$, DCG can be defined as $\\mathrm{DCG} = \\sum _{i = 1}^{N} (2^{y _i} - 1) / \\log _2(i + 1)$ . I suggest the authors consider more recommendation metrics to evaluate ranking loss functions.\n\n**Experiment Analysis:** The experimental analysis is generally thorough, but I have the following additional questions:\n\n- In Section 4.1.1, the authors compare the effects of different loss functions on various NAS tasks and \"observe that no ranking loss functions consistently surpass other competitors across all 13 tasks. For instance, ListNet achieves the top-1 $\\tau$ in NAS-Bench-101 while having the lowest $\\tau$ in the TransNAS-Bench101-Micro Autoencoder task\". Why does this occur? Is it related to the dataset or task? A more insightful discussion is preferred.\n\n- I suggest the authors summarize the criteria for choosing ranking loss functions after the experiments. Specifically, which type of loss function should be selected for a particular dataset size, NAS task, and training portion?\n\n\n\nAdditionally, I have a few **minor concerns** that do not impact the score:\n\n- All instances of @K should be written as @$K$ for consistency.\n- Figure 1 should highlight the best results, perhaps using superscripts.\n- The legend in Figure 2 obstructs the x-axis label \"Training Portion\".\n- The caption for Figure 2 uses \"under various settings\", which is confusing. It could be changed to \"under different training and test portions\"." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@misc{\nji2024evaluating,\ntitle={Evaluating Ranking Loss Functions in Performance Predictor for {NAS}},\nauthor={Han Ji and Yuqi Feng and Jiahao Fan and Yanan Sun},\nyear={2024},\nurl={https://openreview.net/forum?id=4o4fDJL6I7}\n}" }, "abstract": { "value": "Performance evaluation is a critical but compute-intensive procedure in neural architecture search (NAS). To alleviate evaluation costs, performance predictors have been widely adopted to predict architecture performance directly. Recent studies have introduced ranking loss functions into predictors to focus on the architecture rankings instead of absolute accuracy, thus enhancing the ranking ability of performance predictors. Despite the successful application of ranking loss functions, the lack of comprehensive measure metrics and different experimental configurations make a fair comparison among these loss functions a huge challenge. Additionally, some well-known ranking loss functions have not been thoroughly examined in the context of performance predictors. In this paper, we conduct the first study for 11 ranking loss functions containing the existing and the novel ones by comparing their effectiveness in performance predictors under various settings. We find that: (i) The choice of ranking loss function has a major influence on the performance of predictors; (ii) the quality of the architectures searched by the predictor-based NAS methods is closely correlated with the predictor's performance on top-centered rank metrics, rather than traditional metrics like Kendall Tau. We believe these results and insights can serve as recommendations for the optimal loss function to employ in predictors across various search spaces and experimental conditions." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Han_Ji2", "~Yuqi_Feng1", "~Jiahao_Fan4", "~Yanan_Sun4" ] }, "authors": { "value": [ "Han Ji", "Yuqi Feng", "Jiahao Fan", "Yanan Sun" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Neural Architecture Search", "Performance Predictor", "Loss Function" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "ji|evaluating_ranking_loss_functions_in_performance_predictor_for_nas" }, "pdf": { "value": "/pdf/7d763dc769b7837e7434f8362b29983b6ae23f9f.pdf" }, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/20e02d5e09d5922d32ecd6434033b18d469aeafe.zip" }, "title": { "value": "Evaluating Ranking Loss Functions in Performance Predictor for NAS" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4oQHCmnM8R
A Theory of Multi-Agent Generative Flow Networks
main
Active
Generative Model
generative models
3;3;5
5;3;5
1;2;2
2;3;2
1;1;2
3.666667
4.333333
1.666667
2.333333
1.333333
0.5
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "1.\n2. GFN in the multiagent setting may be realized easily in two contexts: \n (a) if the reward is local, then the independent agent has their own independent policy given by a GFN.\n (b) if the reward is global with small communication costs (small observation encoding) and tractable global transitions. \n Centralized algorithm is the formalization of (b) while independent is the formalizaiton of (a) in our framework. We argue in the paper that the condition for a reasonable centralized algorithm is restrictive and that, in general, the reward is global: the Starcraft 3m task is an example where each marine has its own policy, but the reward depends on the state of all three marines at the end of the sequence.\nThe goal of the JFN is to train local agents with independent GFN policies to fit a global reward.\n\n3. The key property of the JFN is the decomposition of the action flow of an abstract global GFN as a product of local action flows. Such a property does allow detailed balance or Trajectory balance objectives. DB and FM loss are very closely related and mostly differ by the implementation choice of the backward policy (FM implements the backward policy by finding parents and computing the forward edge flow for each transition to the current state while DB implements an extra model, the backward policy, either fixed or trainable). Unfortunately, Brunswic et al [1] do not provide stable TB loss suitable for the non-acylic case such as the Starcraft 3m task." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "To begin with, we thank you for the time you took to write this detailed review. \n\n1.\n\n2. Regarding cycles, we leverage non-acyclic losses defined by Brunswic et al [1]. This prior work provides theoretical account of the acyclic limitation of GFN and how to bypass it via so-called stable losses. \n\n3.\n\n4.\n\n[1] Leo Maxime Brunswic, Yinchuan Li, Yushun Xu, Shangling Jui, Lizhuang Ma. A Theory of Non-Acyclic Generative Flow Networks. AAAI 2024" }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": { "value": "To begin with, we thank you for the time you took to write this detailed review. \n\n1. 2. and 3. are actually related to the same misunderstanding. The action space is a fiber bundler over the state space it the space of couples (position,action). Why is that? The actions available to an agent may depend on the state it is in (say the agent is on the edge of a grid, the move beyond the grid limit is not possible). Therefore, to each state $s$ correspond available actions $a$ and $S^{-1}(a)$ the set of such actions. $S$ is simply the projection from $(s,a)$ to $s$.\nThe formalism introduced aims at being general but in practice (and in the whole work), we assume that observations contain the whole information, we may thus identify $\\mathcal S$ to $\\prod_{i\\in I} \\mathcal O^{(i)}$. \nThe transition map $T$ takes an element of the Action fiber bundle, ie a couple $(s,a)$. It thus depends on both state and action. \nFinally, the equation $\\prod_{i\\in I} p^{(i)} \\circ S \\circ \\pi = Id$ means that starting from observation $(o^{(i)})_{i\\in I}$ one may apply the combined policy $\\pi$ to get an action (more precisely a couple state-action), then forget the action to get a state (via the state map $S$) and then recover the observations via the observation projections. This composition should yield the same observation as those we began with. Despite being obvious in practice, it is a necessary mathematical assumption. \n\n4. Indeed, our target consists in sampling states proportionally to the reward the same way a usual GFN would and the same way the centralized MA-GFN does. \n\n5. 6. Indeed, the local rewards are untractable, that's actually a key difficulty of localizing GFNs. They are only used abstractly and in the independent MA-GFN algorithm. And yes, even though GFN could \"in principle\" work with stochastic reward (say by targeting the expectancy of the reward instead of the random value), and even though MSE-based FM-loss are minimized on this target, to my knowledge attempts were not successful. The point of our work is to go beyond that by training the collective of MA-GFN on the deterministic reward by enforcing a FM-property of an abstract global GFN." }, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "See the weakness." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Originality: The paper is one of the first to study the extension of Gflownets to multi-agent settings. \nQuality:The paper proposes four types generative algorithms, and discuss the difference of these algorithms in terms of the training complexity and the performance. \nSignificance: Experiments validates the proposed method outperforms MAPPO, MCMC in terms of modes found and L1 error." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the theory of multi-agent generative flow networks for co-operative tasks. The paper proposes four learning patterns: the Centralized Flow Network (CFN), Independent Flow Network (IFN), Joint Flow Network (JFN), and Conditioned Joint Flow Network (CJFN) algorithms. The paper also does experiments on the toy hyper-grid environment and one StarCraft game." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1.For the clarity, I would suggest that the authors choose the original Gflownet formulations. The FM formulations in this paper and the original FM paper are quite different, which is quite hard to follow the main idea of this paper.\n2.What's the main challenge that extend the Gflownet to multi-agent settings? For now, there seems no technical difficulty for multi-agent Gflownets. \n3.The paper only studies the flow matching objective? does the proposed method applies to other Gflownet learning objectives, such as the detailed balance and the trajectory balance loss?\n4.For the experiments, which algorithm is the best? In the common sense, CFN achieves the best performance. Also, the L1 error of all algorithms are quite high, i.e., these algorithms can not sample the reward distribution in practice. Why does the paper only present the result of JFN on the StarCraft 3m map?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "0) See Weaknesses.\n\n1) Can you please give more detail on how the proposed framework and algorithms differ from the ones presented in [3]?\n\nReferences:\n\n[3] Shuang Luo, Yinchuan Li, Shunyu Liu, Xu Zhang, Yunfeng Shao, Chao Wu. Multi-Agent Continuous Control with Generative Flow Networks. Neural Networks, Volume 174, 2024" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "This work is one of the first to consider a novel setting of multi-agent GFlowNets, providing extensive theoretical framework and results. \n\nIt is known that RL algorithms can be applied to GFlowNet training [1], and this work is among the few [2] that explore the other direction — applying GFlowNets in RL tasks.\n\nReferences:\n\n[1] Daniil Tiapkin, Nikita Morozov, Alexey Naumov, Dmitry Vetrov. Generative Flow Networks as Entropy-Regularized RL. AISTATS 2024\n\n[2] Leo Maxime Brunswic, Yinchuan Li, Yushun Xu, Shangling Jui, Lizhuang Ma. A Theory of Non-Acyclic Generative Flow Networks. AAAI 2024" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper presents a theoretical framework focused on adapting GFlowNets to multi-agent setting, building on the previously proposed theory of non-acyclic GFlowNets. Several training algorithms are proposed that can work in centralized and decentralized settings, and experimental evaluation is provided on both synthetic and real-world tasks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I have two major concerns about this works.\n\nMy first major concern is the clarity of the text. For the most part I did not understand the methodological and theoretical results of this paper. The main thing hindering readability is a combination of very heavy mathematical notation with a lack of consistency, clarity and correct order of definitions. Here are some examples:\n\n1) I did not understand what is state map $S$ (line 80) and what is its purpose. It is introduced in Section 2 but never used in the main text of the paper.\n\n2) Why does transition kernel (line 80) only depend on the action, not on state-action pairs? In standard RL and multi-agent RL formulations it depends on both.\n\n3) Can you please explain the equation $\\prod_{i \\in I} p^{(i)} \\circ S \\circ \\pi=\\mathrm{Id}$ (line 94)?\n\n4) The task that multi-agent GFlowNets try to solve is never formally defined. After reading Section 2 one can guess that it is sampling global states with probabilities proportional to the global reward, but the task has to be explicitly stated nevertheless.\n\n5) Local rewards $R^{(i)}$ appear in Section 2 (line 148), but their definition and connection to global reward is given only in the next section.\n\n6) Their definition given in Section 3 is $R^{(i)}\\left(o_t^{(i)}\\right):=\\mathbb{E}\\left(R\\left(s_t\\right) \\mid o_t^{(i)}\\right)$ (line 189), and they're said to be utilized in the local training loss. From what I understand, this expectation is intractable in the general case, so I do not understand how are they defined in practice. Authors mention that it is possible to use stochastic rewards instead, but as far as I am aware, GFlowNet theory introduced in previous works, upon which this paper builds, does not support stochastic rewards.\n\n7) On line 241, authors mention: \"At this stage, the relations between the global/joint/local flow-matching constraints are unclear, and furthermore, the induced policy of the local GFlowNets still depends on the yet undefined local rewards.\" In my humble opinion, if any novel definition/theorem/algorithm depends on some object, the object has to be previously introduced and properly defined in all cases.\n\nI believe that this paper could greatly benefit from using simplified notation in the main text (while the full set of definitions can be introduced and used in appendix), as well as major revision of Sections 2 and 3 to ensure that the problem itself and all objects we work with are properly defined and explained to the reader in proper order. \n\nMy second concern is related to the presentation of experimental results. The abstract states: \"Experimental results demonstrate the superiority of the proposed framework compared to reinforcement learning and MCMC-based methods.\" Conclusion also has a similar statement (line 470). While on toy synthetic hypergrid environment the proposed methods do show significant improvement over baselines, the results on a more interesting StarCraft task do not support this claim. The proposed JFN algorithm falls behind 3 out of 4 baselines and performs similarly to the remaining one (which is Independent Q-Learning, one of the simplest existing algorithms in multi-agent RL). I understand that the main contributions of this paper are theoretical and methodological, but neverhtless I suggest correcting the statements to faithfully reflect the presented results. I also understand that such metric as win rate may not favor GFlowNets compared to RL approaches, but then I would also suggest presenting some other quantitative metric to demonstrate the utility of the proposed approach in this task, e.g. some measure of diversity." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 1 }, "primary_area": null, "questions": { "value": "1. How does the proposed approach differ from Li et al. (2023)'s work on multi-agent GFlowNets? Please clarify the novel contributions relative to this prior work.\n2. How does the proposed method handle cyclic state transitions in StarCraft II environments, given that GFlowNets traditionally assume acyclic state spaces?\n3. The L1 errors shown in Figure 3 are quite high. Could the authors explain why this occurs and how it affects the practical utility of the method? What specific advantages does the MA-GFN approach offer over single-agent GFN solutions for the presented Grid tasks? Could the authors provide experimental comparisons?\n4. Why is it evaluated only on the simplest 3m StarCraft II scenario? Have the authors tested your approach on more complex multi-agent scenarios?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 1 }, "strengths": { "value": "The paper is easy to understand (although some important details are missing as discussed in the next part), and the paper studies an important problem in extending GFlowNets to handle multi-agent tasks." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a theoretical framework for multi-agent generative flow networks (MA-GFlowNets) and presents four algorithms: Centralized Flow Network (CFN), Independent Flow Network (IFN), Joint Flow Network (JFN), and Conditioned Joint Flow Network (CJFN). The authors introduce a local-global principle based on the principles in MARL that allows training individual GFNs as a unified global GFN. The authors evaluate their approach on Grid and SMAC tasks by comparing with MARL and MCMC approaches." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The paper's novelty claim is questionable. The authors state \"However, based on current theoretical results, GFlowNets cannot support multi-agent systems\" while ignoring relevant prior work, particularly Li et al. (2023) which already explored multi-agent GFlowNets.\n2. The experimental evaluation has several limitations:\n- Only uses the simplest 3m StarCraft II environment, and there is little performance improvement.\n- Results in Figure 3 show very high L1 errors, which suggests poor learning. Doesn't demonstrate clear advantages over single-agent GFlowNets approaches.\n- Little performance improvements over baselines\n2. The paper doesn't adequately address the cyclic environment problem. GFlowNets traditionally work best in acyclic environments, but the paper doesn't explain how they handle cycles in StarCraft II scenarios.\n3. The motivation for using MA-GFN in the chosen tasks is not well justified. Many of the presented problems could potentially be solved more effectively with single-agent GFlowNets approaches." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024a,\ntitle={A Theory of Multi-Agent Generative Flow Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4oQHCmnM8R},\nnote={under review}\n}" }, "abstract": { "value": "Generative flow networks utilize a flow-matching loss to learn a stochastic policy for generating objects from a sequence of actions, such that the probability of generating a pattern can be proportional to the corresponding given reward. However, a theoretical framework for multi-agent generative flow networks (MA-GFlowNets) has not yet been proposed. In this paper, we propose the theory framework of MA-GFlowNets, which can be applied to multiple agents to generate objects collaboratively through a series of joint actions. We further propose four algorithms: a centralized flow network for centralized training of MA-GFlowNets, an independent flow network for decentralized execution, a joint flow network for achieving centralized training with decentralized execution, and its updated conditional version. Joint Flow training is based on a local-global principle allowing to train a collection of (local) GFN as a unique (global) GFN. This principle provides a loss of reasonable complexity and allows to leverage usual results on GFN to provide theoretical guarantees that the independent policies generate samples with probability proportional to the reward function. Experimental results demonstrate the superiority of the proposed framework compared to reinforcement learning and MCMC-based methods." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Generative Model" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/254145a2d2f6a88590de87261565921222889d45.pdf" }, "presentation": null, "primary_area": { "value": "generative models" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/465d017b70df4da6c427f6d304ea2b105101abe3.zip" }, "title": { "value": "A Theory of Multi-Agent Generative Flow Networks" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4oj7tYujwP
ERiC-UP$^3$ Benchmark: E-Commerce Risk Intelligence Classifier for Detecting Infringements Based on Utility Patent and Product Pairs
main
Active
Benchmark; Product-Patent Infringement Detection; Large-scale Multi-Modality Dataset; Contrastive Learning; Retrieval; Domain Gap
datasets and benchmarks
3;5;5
5;4;3
2;2;3
2;2;2
2;2;2
4.333333
4
2.333333
2
2
-0.866025
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "Please check the weaknesses section above." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This work narrows the gap between e-commerce and machine learning research, which is a valuable try and has potential to further enlarge the impact of machine learing.\n2. In addition to the proposed benchmark dataset, it also provides detailed statistical analysis with several backbone experiments for reference.\n3. Overall, the writing is easy to follow." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a benchmark dataset for E-commerce intelligence for machine learning field. This research narrows the gap between the E-commerce area with current artificial intelligence research. In addition, this draft further provides analysis for the proposed benchmark and gives several baseline methods for reference to following research works." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. Some parts of the draft are not well-prepared, such as tab.7 and 8. The overall format needs a careful polish.\n2. Even if the proposed benchmark is for multi-modal learning, especially for vision-language interaction, I still think this topic fits better for data mining or multi-media conferences, especially considering it is a dataset-oriented paper.\n3. Captions of figures and tables are necessary to be enriched. At least, they need to indicate the conclusion of the tables and figures. Overall, they need to be more informative." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "as above" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "Marking products and infringing data is a very heavy workload, this work has done some valuable efforts to annotate the data.\n\nThe text data of the product and the patent have been rewritten, which can avoid the potential meaning difference." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The article proposes a new task of detecting potential infringing patents for a given product, and introduces a large-scale MultiModal Machine Learning dataset called ERiC-UP3, aimed at promoting research in this field. The dataset contains over 13 million patent samples and 1 million product samples, providing real-world scenarios needed for deep functional understanding to promote innovative and practical solutions for intellectual property infringement detection. It also provides some evaluation baselines and testing methods. In essence, it has the following setting:\n\nSearch task set: Retrieve the patent q that product p is most likely to infringe and give the probability ranking of infringing patents in the patent list\n\nTask objective: Ensure that patents with the most similar functions and potential infringement are ranked highest in the list sorting" }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The writing is rather confused.\n\nThis article mainly discloses the patent product infringement data of MultiModal Machine Learning, but the main text mainly discusses the single modality of text, and there is little use and verification of image modality.\nFor datasets, some graphic and table information is invalid. There is a lot of redundant information in the paired graphic and text dataset. This article rarely mentions and verifies how to ensure that this data is effective for training, and rarely uses this data for experiments and verification of graphic and text information for infringement conflicts.\nThe expert evaluation mentioned in the article mainly evaluates whether there is infringement between the product and the patent, rather than evaluating the validity of the data\n\nHowever, from the perspective of CS, it is not clear whether these MultiModal Machine Learning data are effective and what the purpose of using these data is. \n\n\nI have several questions regarding this work:\n\n1. The overall framework of the paper is quite chaotic, and the research framework is not clear.\n\n2. The experiment is comprehensive, but many tables have unclear meanings and are chaotic, a bit like an experimental report\n - The Table 7 compares which method, I can't tell, and it's not specifically written in the article, just said the score is high.\n - In Table 8, Using LLM to rewrite the text data of the product and the patent can effectively avoid the difference in emphasis between the two. However, it is hasty and inaccurate to determine that 0.5b qwen is the best for only three categories. Llama3-8b also has multiple high scores. Why not consider llama3-8b?\n\n\n3. In Figure 6, the significance of calculating the recall rate of the top 500 is not great, and the average value of each CPC category is not given, and it cannot be seen that this mAR@500 is a good evaluation index, and the number of samples of each CPC classification is very different, the variance is very large, why not use top 10% or top 1% as the evaluation index, as shown in the figure below is the order of magnitude of 10 ^ 6. \n\n4. The experimental framework of MultiModal Machine Learning fusion retrieval is not clear\n For example, how to evaluate after image retrieval mAR@500 scores are not given\n Why is it first evaluated through text matching to the relevant patent pool, and then evaluated through image retrieval, rather than directly conducting image retrieval (missing this experiment)?\n\n This article mainly conducts experiments on text modality, with little emphasis on the role of MultiModal Machine Learning data, and does not reflect the significance of MultiModal Machine Learning data for infringement retrieval.\n\nRegarding the experiment of MultiModal Machine Learning in this article, the following questions are raised:\n\n- The MultiModal Machine Learning experiment in the main text of this article is just a simple stitching, text classification + image retrieval. Where is the specific integration of MultiModal Machine Learning reflected?\n- The experiments are all here and there, and the overall performance of MultiModal Machine Learning cannot be seen\n- Table 9 shows the image retrieval results after text classification. Which method is used for the first step of text classification? Or is it directly given classification for image retrieval to eliminate errors caused by text classification? If not, how to eliminate errors? Why not directly retrieve images? The explanation is not comprehensive enough.\n\n\n5. The experimental data is incorrect. The experimental table of MultiModal Machine Learning in the above text is different from the data given in the last supplementary material.\n - The experimental data of the plain text of table5 and table16 are inconsistent, and there is no other data of 71.43.\n \n6. Many of the experiments in the supplementary materials are not mentioned in the main text, and there is no clear definition of how the methods are done, how to conduct mixed experiments, or how to conduct mixed voting, and how to evaluate them\n - According to Table 16, the article only mentions simple concatenation, but does not explain how the following two fusion are done, and the description is quite confusing. Is the voting experiment at the end just a simple union of the results of the baselines of the original two modes? Or are there other voting operations?" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "see wearkness. I also has a question about the significant of this work: 1) Can Google Patents (https://patents.google.com/) be used for detect infringement? 2) Is Amazon conduct infringement screening before releasing the product? If they do so, i think that only very limited samples in the ERiC-UP$^3$ involves infringement, and training model with ERiC-UP$^3$ cannot significant detect real-world product." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. A new dataset for detecting infringements in the patent domain (the unique aspect lies in the annotation).\n2. A proposed pipeline to surpass existing methods.\n3. Some useful takeaways to improve detection." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents ERiC-UP$^3$, a dataset with annotations to detect infringement behaviors between a given Amazon product and existing patents. This dataset includes 1 million product samples, 13 million patent samples, and 11,000 meticulously annotated infringement pairs for training and 2,000 for testing. This work benchmarks existing baselines and proposes a two-stage pipeline for effectively conducting infringement detection. This paper also provides some best practices to improve detection." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The dataset offers some novelty but is largely a domain adaptation from existing datasets like [1] and [2]. Its main advantage lies in expert annotations on infringement cases. However, the dataset’s scale is limited, with relatively few annotations, and the patent and product samples were scraped from the internet. Additionally, the distinction between \"base\" and \"large\" versions is minimal.\n\n2. The writing lacks clarity, making it hard to grasp key points at first glance: (1) Is infringement treated as a ranking problem? (2) What constitutes the \"domain gap\"? Is it simply a stylistic shift? (3) Why were these particular classes selected?\n\n3. The technical pipeline appears ad hoc. Why use a two-stage approach instead of a streamlined, end-to-end model? Why can't existing models address this problem effectively? Why wasn’t the current infringement detection pipeline integrated into the study?\n\n4. Key baselines are missing from this study: (1) multimodal baselines, such as LLaVA, and (2) baselines from prior infringement detection research.\n\n5. Important ablations on the pipeline components are absent. For instance, how does removing expert labels affect training? What are the results if detections are run without training labels?\n\n6. The analysis part is shallow, with findings that are largely known within the field.\n\n7. The literature review lacks the recent works.\n\n8. Some obvious typos in number and upper/lower case.\n\n[1] A Dataset and Benchmark for Copyright Infringement Unlearning from Text-to-Image Diffusion Models\n[2] TMID: A Comprehensive Real-world Dataset for Trademark Infringement Detection in E-Commerce" }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024ericup,\ntitle={{ER}iC-{UP}\\${\\textasciicircum}3\\$ Benchmark: E-Commerce Risk Intelligence Classifier for Detecting Infringements Based on Utility Patent and Product Pairs},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4oj7tYujwP},\nnote={under review}\n}" }, "abstract": { "value": "Innovation is a key driver of economic and social progress, with Intellectual Property (IP) protection through patents playing a crucial role in safeguarding new creations. For businesses actively producing goods, detecting potential patent infringement is vital to avoid costly litigation and operational disruptions. However, the significant domain gap between products and patents—coupled with the vast scale of existing patent databases—makes infringement detection a complex and challenging task. Besides, the machine learning (ML) community has not widely addressed this problem, partly due to the lack of comprehensive datasets tailored for this task. In this paper, we firstly formulate a new task: detecting potentially infringing patents for a given product represented by multi-modal data, including images and textual descriptions. This task requires a deep understanding of both technical and legal contexts, extending beyond simple text or image matching to assess functional similarities that may not be immediately apparent. To promote research in this challenging area, we further introduce the ERiC-UP$^3$ ($\\textbf{E}$-commerce $\\textbf{R}$isk $\\textbf{i}$ntelligence $\\textbf{C}$lassifier on $\\textbf{U}$tility $\\textbf{P}$atent $\\textbf{P}$roduct $\\textbf{P}$air) benchmark, a large-scale, well-structured dataset comprising over 13-million patent samples and 1 million product samples. It includes 11,000 meticulously annotated infringement pairs for training and 2,000 for testing, all rigorously reviewed by patent experts to ensure high-quality annotations. The dataset reflects real-world scenarios with its multi-modal nature and the necessity for deep functional understanding, offering unique characteristics that set it apart from existing resources. As a case study, we provide results from a series of baseline methods and propose a simple yet effective infringement detection pipeline. We also explore additional approaches that may enhance detection performance, such as text style rewriting, cross-modal matching effectiveness, and image domain alignment. Overall, the ERiC-UP$^3$ benchmark is the first strictly annotated product-patent infringement detection dataset and stands as the largest multi-modal patent dataset, as well as one of the largest multi-modal product datasets available. We aim to advance research extending language and multi-modal models to diverse and dynamic real-world data distributions, fostering innovation and practical solutions in IP infringement detection." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Benchmark; Product-Patent Infringement Detection; Large-scale Multi-Modality Dataset; Contrastive Learning; Retrieval; Domain Gap" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/fc55ebf0f82b1bb8c79e03a1a5c6d6777ef02c2c.pdf" }, "presentation": null, "primary_area": { "value": "datasets and benchmarks" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "ERiC-UP$^3$ Benchmark: E-Commerce Risk Intelligence Classifier for Detecting Infringements Based on Utility Patent and Product Pairs" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4pRwkYpa2u
Rethinking Light Decoder-based Solvers for Vehicle Routing Problems
main
Active
Combinatorial Optimization;Vehicle Routing Problem;Generalization
optimization
3;5;5;6;6
4;4;4;3;4
3;3;3;3;2
2;3;2;3;3
2;2;2;3;3
5
3.8
2.8
2.6
2.4
-0.456435
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "How does ReLD manage complex or dynamic VRP constraints, such as real-time updates or varying demands?\nHow could larger decoder modifications enhance OOD performance?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The paper provides a detailed breakdown of the light decoder’s limitations in VRP, particularly the static embeddings’ burden on the encoder.\n\nReLD addresses an important need in VRP research generalization across problem scales. \n\nThe modifications retain the light decoder’s computational efficiency, which could be advantageous for applications needing faster routing solutions without the computational load of heavy decoders." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents an analysis of light decoder-based solvers for VRP, specifically addressing the challenges of generalization to out-of-distribution (OOD) problem instances. By identifying limitations due to the reliance on static embeddings, the authors propose a modified approach, ReLD (Rethinking Light Decoder), which incorporates identity mapping and a feed-forward layer to enhance the decoder’s capacity. The proposed model demonstrates improved OOD performance across a variety of VRP instances, narrowing the performance gap between light and heavy decoder approaches." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Limited Scalability for Large Instances: ReLD struggles to compete with heavy decoder architectures on very large instances, such as CVRP1000. This limitation suggests that ReLD’s current modifications might not be sufficient for all scales of VRP.\n\nThe proposed modifications are relatively minor adjustments. While effective, they lack substantial novelty within the machine learning field.\n\nWhile this paper makes a valuable contribution by revisiting the light decoder paradigm and identifying limitations in current architectures, its primary innovations are modest. The architectural modifications, though effective, are straightforward and may not sufficiently address scalability issues, particularly in very large instances or complex real-world VRP variants. Moreover, the method is not showing SOTA results on the biggest problem instances, which is assumed to be the main advantage of the method." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Besides the concerns raised in the weakness part, I have the following additional questions:\n\n1. In Table 5, please check whether all the numbers you are reporting are correctly documented. The bolded number in OVRPLTW and the bolded number in OVRPBLTW are either too big or too small and do not correspond to the reported gaps.\n2. the figure 1 could be further improved. E.g. the font size and the caption." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The paper is well-written and easy to follow\n2. A thorough empirical analysis is conducted to validate the potential limitation of current light decoder-based solvers\n3. A simple but effective modification is performed to improve the decoder part of the current decoder-based solvers\n4. Experiments are conducted on many datasets covering different distributions, problem sizes, and problem classes." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper revisits light decoder-based solvers for VRPs, recognized for their efficiency but limited in generalization to larger or varied problem instances. The authors attribute this limitation to the handling of static embeddings, which creates high information density in the encoder, overwhelming the simplistic decoder. To overcome these challenges, they propose an enhanced decoder structure, incorporating identity mapping and feed-forward layers to effectively boost the decoder’s capacity and improve generalization performance. The authors perform experiments to demonstrate that ReLD achieves better generalization performance on both in-distribution and out-of-distribution tasks, closing the performance gap with heavier decoders while maintaining computational efficiency." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The analysis in this paper is largely 'end-to-end,' with a strong reliance on empirical results presented towards the conclusion. Several concerns regarding the encoder and decoder architectures are raised, and it may be beneficial to adopt a more direct investigative approach into these components.\n\nAdditionally, the limitations of the current light decoder-based model are inferred from empirical experiments conducted solely on CVRP problems, focusing on LEHD and POMO models. A broader analysis including more models with both light and heavy decoder architectures would provide a more comprehensive foundation for the conclusions drawn.\n\nThe insights presented offer valuable guidance on improving the decoder architecture to address limitations associated with overly simplified decoders. This work implements a minor modification in this direction; however, the extent to which further increases in model complexity would yield additional performance gains remains uncertain. This issue points to a trade-off between model performance and efficiency, though an optimal balance between the two has yet to be determined.\n\nOverall, I appreciate the discussions and architectural considerations raised by this paper concerning VRP model design. Currently, I lean toward borderline acceptance." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. Do you have any results on the large-scale instances, e.g., CVRP-10000?\n2. Can this method be transferred to TSP or other CO problems? Do you have any preliminary results?" }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1. This paper approaches the deep learning for VRP problem from a new perspective: make the decoder contain richer information.\n2. The modification is not complicated and the results looks good." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper studies the limitations of light decoder-based solvers for Vehicle Routing Problems (VRP). Modern RL methods for VRP typically employ light decoders for solution generation due to their efficiency. However, the authors think the light decoders may not capture the problem structure well. So they proposed Revised Light Decoder (ReLD) which modified the original light decoder and make it contain richer information.\n\nThe experiment results show that their framework can improve the current state-of-the-art methods." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The scalability issue still exists. There are no large-scale experiments conducted." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "These are not questions, but rather minor shortcomings related to the paper’s writing.\n\n(1) There are too many versions of ReLD introduced in the paper, and it is difficult to follow what each version represents.\n\n(2) Shouldn't Figure 1 also reflect the changes described in Section 3.2, Powerful Query? Currently, it seems to only illustrate the changes discussed in Section 3.1, Direct Influence of Context.\n\n(3) Table 5 contains several mis-copied numbers and incorrect placement of bold formatting." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "The authors effectively highlight a key issue with static embeddings: the gap between the information needed for optimal decoder performance and the information stored in the context vector. This insight is both logical and significant, emphasizing a critical area for future improvements in this field. The research in this direction holds promising implications for advancing VRP solutions." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The authors first address the challenges of light decoder-based approaches for vehicle routing problems (VRPs). Because the traditional approach relies on static embeddings that must capture complex information within a single set of representations, it is difficult for the simplistic decoder to fully leverage this information, particularly in out-of-distribution (OOD) scenarios (such as generalizing to larger instances or different VRP variants). Enhancements to the decoder is thus introduced, such as adding an identity mapping and a feed-forward layer, to mitigate this issue. Experimental results demonstrate that this adjustment improves both in-distribution and OOD generalization performance, narrowing the gap between light and heavy decoder paradigms." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(1) The issue of the overly complex context vector is not directly addressed. The authors’ solution is somewhat simplistic, adding only extra structures to the decoder without any modification to the encoder or main inference process. This solution still has the same 'complex context vector' problem as described in \"Gap between Policy Formulations\" subsection (LINE 195). Given this limited change, the improvement in performance, such as in CVRP100, is also marginal.\n\n(2) One of the paper's main claims is that their method improves out-of-distribution (OOD) performance. However, the practicality of the approach based on zero-shot generalization is unclear. Why is it necessary for a model trained on N=100 cases to perform well on N=1000 cases? There are many established methods to address OOD problems, such as fine-tuning before inference, tree search or active learning during inference. The proposed modifications should ideally be evaluated in these more practical and realistic settings rather than in zero-shot scenarios.\n\n(3) With additional parameters in the decoder, the training burden likely increases. The training details are largely missing, particularly regarding how the proposed modifications affect training time and resources. The authors should also compare their approach to other potential modifications that increase parameters in the decoder, such as adding an additional decoding layer, in terms of training efficiency and resource requirements, as well as the solver performance." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- How $q_c$ in Equation (11) is used?\n\n- What are $W_S^OW_S^V$ in Equation (8)? Does this imply performing value projection followed directly by output projection? As far as I know, such an operation does not exist in the original transformer. Could you provide a more detailed explanation regarding this part?\n\n- The modification presented in this paper adds an FF layer to the POMO decoder, and the decoders in models like POMO operate almost as many times as the number of nodes in an auto-regressive manner. Therefore, it is expected that the model modification proposed in this paper will increase both training and inference (optimization) times. However, in line 361 of the paper, it states that the additional step-wise running time is independent of the number of nodes and is computationally efficient. Could you please provide a more detailed explanation on this? If possible, it would be helpful to provide numbers on the changes in training and inference times due to the model modification.\n\n- How effective is the Distance Heuristic($-log(dist_i)$) in Equation 12? Is there any information on how the model performs if this heuristic is removed?\n\n- In Table 3, the CVRP100 gap for POMO augx8 is 1.004%, which shows a significant difference from the 0.32% gap for CVRP100 presented in the original POMO paper. Could you explain the reason for this discrepancy?\n\n- In Table 3, the addition of the ff layer appears to provide little benefit for CVRP100. Why does adding the ff layer to the decoder not contribute to performance improvement for a problem size 100?\n\n- As a minor comment, the font size in Tables 3 and 4 is too small." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- This paper analyzes the limitations of the Light Decoder-based model and demonstrates the complexity of the encoded information and lack of generalization performance through experiments.\n\n- This paper proposes a novel approach by modifying the decoder to overcome the issues inherent in Light Decoder-based models, and demonstrates the superiority of the proposed method through experiments. \n\n- In the CVRP generalization experiments, it showed promising results than existing Light Decoder-based models and performed well across various VRPs." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper discusses the limitations of the Heavy Encoder-Light Decoder based model, which is commonly used in combinatorial optimization. Specifically, it addresses the problem where the encoder must embed all possible contextual information required during the decoding process into a single embedding, resulting in excessively high information density. It also highlights how the decoder fails to utilize this information. As a solution, the authors propose ReLD. The proposed method is validated through experiments on CVRP and VRP variants, demonstrating its effectiveness. In particular, the method showed superior performance compared to existing Light Decoder-based solvers when faced with out-of-distribution (OOD) problems." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- There are unclear parts in the equations and content of Section 3 Methodology. It also does not match well with Figure 1(e). It is unclear whether $Q$ in Figure 1 (e) is the same as $q_c$ in Equation (11) or not. And It is difficult to understand the structure of the proposed neural network in Equation (7) to (11). For example, it is not clear how $q_c$ in Equation (11) is used. Relevant questions can be found in the Question section. \n\n- In the experiments, it is unclear whether the improvement in VRPs solution accuracy is due to the addition of the feedforward network itself or simply the increase in decoder parameters caused by adding FF. It seems necessary to conduct a comparative evaluation with a decoder where the parameters are increased equivalently without FF. Furthermore, a more solid logical explanation is needed on how the FF helps overcome the limitations of the light decoder." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024rethinking,\ntitle={Rethinking Light Decoder-based Solvers for Vehicle Routing Problems},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4pRwkYpa2u},\nnote={under review}\n}" }, "abstract": { "value": "Light decoder-based solvers have gained popularity for solving vehicle routing problems (VRPs) due to their efficiency and ease of integration with reinforcement learning algorithms. However, they often struggle with generalization to larger problem instances or different VRP variants. This paper revisits light decoder-based approaches, analyzing the implications of their reliance on static embeddings and the inherent challenges that arise. Specifically, we demonstrate that in the light decoder paradigm, the encoder is implicitly tasked with capturing information for all potential decision scenarios during solution construction within a single set of embeddings, resulting in high information density. Furthermore, our empirical analysis reveals that the overly simplistic decoder struggles to effectively utilize this dense information, particularly as task complexity increases, which limits generalization to out-of-distribution (OOD) settings. Building on these insights, we show that enhancing the decoder capacity, with a simple addition of identity mapping and a feed-forward layer, can considerably alleviate the generalization issue. Experimentally, our method significantly enhances the OOD generalization of light decoder-based approaches on large-scale instances and complex VRP variants, narrowing the gap with the heavy decoder paradigm." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Combinatorial Optimization", "Vehicle Routing Problem", "Generalization" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/61dfb12f9d4f4be2feff2e181da7247c9565dd0d.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Rethinking Light Decoder-based Solvers for Vehicle Routing Problems" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4qRCiEZGKd
Neural Description Logic Reasoning over Incomplete Knowledge Bases
main
Active
concept learning;description logic;knowledge bases;neural reasoner;embeddings;SROIQ;atomic concepts
other topics in machine learning (i.e., none of the above)
3;3;3;3;5
2;4;4;5;2
3;3;2;2;2
2;1;1;1;2
3;2;2;4;2
3.4
3.4
2.4
1.4
2.6
-0.583333
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 5 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": { "value": "None" }, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "Please clarify issues I mentioned under weaknesses, esp. ones related to *4 Description of the experimental setup is weak*" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1 The targeted problem is interesting\n2 The proposed approach can be easily reproduced" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This is a very well-written paper on an interesting problem. The paper is mostly sound. But it downplays the capabilities of related work and overpromises on what it accomplishes. Furthermore, the originality of the proposed approach is minimal - which might perhaps be ok if there were a comprehensive evaluation, which, however, is not part of the paper." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1 The paper overpromises\n2 Suggested method is a trivial extension of existing methods\n3 Experimental comparison with related work is weak\n4 Description of the experimental setup is weak\n\n\n*1 The paper overpromises*\nIt says \"We propose neural semantics to tackle the instance retrieval problem on incomplete or inconsistent SROIQ KBs\".\nWhat the paper actually does is that it (step A) computes a backbone based on a very limited set of axioms only containing instance assertions, role assertions, and subsumption axioms of the explicit form (C rdfs:subclassOf D). This is a tiny subset of SROIQ axioms.\nBased on this subset, it allows for (step B) the querying of SROIQ concepts from a very limited set of queries, i.e., the ones listed in Table 3, but no recursive definition of concept expressions was applied, again underutilizing the capabilities of SROIQ (at least I could not read this from the paper).\n\n*2 Suggested method is a trivial extension of existing methods*\nThe two steps (step A) and (step B) could have been trivially done by a range of Complex Query Answering methods. \n\n*3 Experimental comparison with related work is weak*\nThe proposed approach is an approximation. The only comparisons are made against sound (and complete) semantic reasoners. Other trivially available approximations are not considered. As mentioned above, complex query-answering methods will be available. Even if few constructs would not be available in a particular answering method, others would be and could be compared.\nSimilarly, maybe worse, the statement that description logic embeddings do not support instance retrieval is wrong. Already, the computation of the backbone in these methods could have been more powerful than (step A) suggested here, and a comparison to their approximation would be easily possible. Note, for example, that a union query would be trivially available for box or ball embeddings by exactly returning the disjunction of the two elements. Note that this is a *trivial* modification and does not change these suggested methods, since no complex composition of concept expressions is required.\n\n*4 Description of the experimental setup is weak*\nThe procedure for constructing wrong axioms is unspecified.\nThe procedure for having queries remains vague (are retrievals of composed concept expressions part of the queries?)\nIt is unclear to what extent the benchmark datasets do or do not exploit the expressiveness of SROIQ." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "(1) Section 3.2 introduces the link prediction task, which is a standard task over graph data, but how does it relate to instance retrieval that is conducted in the experiments? Also, it seems no (standard) link prediction experiment was conducted. If so, what is the purpose to mention link prediction in Section 3.2?\n\n(2) How were the KB embeddings trained? Does the KB embedding training process form a part of EBR? These details are essential for readers to understand the working process and utility of EBR. \n\n(3) The authors claim that EBR could scale to large datasets. But according to the dataset statistics in the appendix, the largest dataset Vicodi only has 33K instances and 116K assertions. On the other hand, real-world KBs such as Freebase, DBpedia, are typically in million or even billion scale. I wonder if there are any standard convention to conceptualize “large-scale KBs\"? I am also curious about whether the proposed EBR can handle KBs at the scale such as Freebase?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "(1) This work explores the important field of neuro-symbolic reasoning, which is crucial for advancing knowledge representation and reasoning, especially for real-world applications where incomplete or noisy data is unavoidable.\n\n(2) The time efficiency for performing reasoning on large, noisy KBs is also important in practice. \n\n(3) The paper provides detailed background of description logic and SHOIQ syntax, offering clear formulations that help readers understand the context of the task and the proposed approach." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper presents Embedding-Based Reasoner (EBR), a neural description logic reasoner designed to handle large-scale, incomplete, and inconsistent knowledge bases. EBR approximates logical reasoning under the $\\mathcal{SHOIQ}$ syntax by using existing neural embeddings for the KB, aiming to provide a scalable and robust solution for handling noisy data. The experiment results demonstrate superior instance retrieval performance of EBR over conventional symbolic reasoners including HermiT, Pellet, JFact, and Openllet, across several benchmark datasets." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "(1) First, the technical contribution is unclear. Although the paper introduces EBR as a novel neural reasoner for incomplete/inconsistent KBs, it heavily relies on existing neural embedding techniques, with limited originality beyond adopting the embeddings for DL-based reasoning. This makes it unclear what aspects of EBR are technically new. \n\nI understand the theoretical contribution as to introduce the mapping between DL syntax and neural semantics. However, as EBR is only applied to the task of instance retrieval, it remains unclear that, whether, and to what extent, the mappings help to improve the performance of the reasoner. Besides, no evidence (e.g., provable guarantee) was given to justify the correctness on the theoretical side, which further limits the significance of the work.\n\nSeveral improvements could be done to improve the paper, including (i) present explicit comparison about how EBR fundamentally differs from prior approaches; (ii) provide evidence (if any) such as theoretical guarantee to validate the contribution. \n\n(2) The evaluation process is unclear. Section 4 and Section 5 do not explain in detail how to conduct “instance retrieval” in a given KB. How does the EBR reasoner work in the experiments? For example, does this process involve reasoning over graph structure? How to compute the score for each entity with each concept? \n\nSection 3.2 introduces the link prediction task, which is a standard task over graph data, However, the experiments only conduct instance retrieval but not link prediction. Does instance retrieval relate to link prediction? If so, this should be clarified to avoid confusion. If not, then what is the purpose to mention link prediction in Section 3.2?\n\nHow were the KB embeddings trained? Does the KB embedding training process form a part of EBR? All the details including input/output scheme, encoding/decoding process, message passing mechanism, loss function, etc., are essential for readers to understand the working process and the utility of EBR. (The detailed settings might be given in the appendixes, but the current version does not contain them.)\n\n(3) Lack of Comparisons with Neural Embedding-based Models. In the paper, EBR is only compared with traditional symbolic reasoners, while there is no comparison with recent neural-based or hybrid models that can also handle incomplete data (e.g., rule learning models including Neural-LP[1], DRUM[2], or ontology-aware neural models). \n\n[1] Fan Yang, Zhilin Yang, William W. Cohen. Differentiable Learning of Logical Rules for Knowledge Base Reasoning. NeurIPS 2017\n\n[2] Ali Sadeghian, Mohammadreza Armandpour, Patrick Ding, Daisy Zhe Wang. DRUM: End-To-End Differentiable Rule Mining On Knowledge Graphs. NeurIPS 2019\n\n(4) Case study and detailed analysis should be presented. The current evaluation only reports the Jaccard similarity, F1 scores and running time for instance retrieval, which are all high-level statistics and provide little insights about the underlying work process and benefits of EBR. \n\nTo improve this, instead of simply reporting the metric scores on every datasets, I suggest the authors to include analysis of some cases extracted from any dataset. For example, by comparing the different performance of EBR and the baselines, readers could gain more insights about why EBR or any baseline makes it correct/incorrect.\n\n**Minor issues**\n\n(1) Line 101, “…iff $C^\\mathcal{I} \\sqsubseteq D^\\mathcal{I}$…” should be “$C^\\mathcal{I} \\subseteq D^\\mathcal{I}$”. \n\n(2) Line 241, “The syntax and semantics for concepts in SROIQ are provided in the appendix.”---They are not in the appendix.\n\n(3) All the tables in the appendix need to be discussed. Leaving the tables alone without any analysis provides little information for the readers." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* Is there a critical point or noise level that significantly reduces the performance of EBR? Can you provide some applicability conditions?\n* How does EBR ensure consistency between embedded representations and symbolic inference logic?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "* The paper is well organized.\n* Experiments carried out by authors are sufficient." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel embedded reasoning model called Embedding Based Reasoner (EBR), aimed at addressing the issues of incompleteness and inconsistency in the Knowledge Base (KB). The traditional symbolic inference engine is inefficient and not robust enough when dealing with large-scale or erroneous data KB. In this paper, the neural inference engine EBR overcomes these shortcomings by quickly approximating the inference results of the symbolic inference engine through embedding technology." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The theoretical explanation of the method is limited: EBR uses embedded reasoning techniques, but there is insufficient detailed explanation of its theoretical basis and working principle. Lack of in-depth analysis of the consistency and interpretability of embedded models in DL semantics may affect trust in the robustness and reliability of the method.\n* Although EBR has significantly improved efficiency on large-scale datasets, there is a lack of detailed quantitative analysis of its computational resource requirements, such as memory consumption and GPU computing resources." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 1 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "- How are complex role assertions handled by your reasoner? \n- How can you deal with the open world semantics underlying SROIQ? \n- What other approach to combining complex database queries with neural approach exist and how does your approach perform in comparison to these?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Combining symbolic reasoning with neural methods is a very promising approach to mitigate the known problems of the two approaches, containing the ones mentioned in the paper." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper claims to present a neural reasoner that captures the full semantics of the description logic SROIQ while scaling to large knoweldge bases. The paper discusses related work, presents the reasoning approach and evaluates the correctness of the model in three different closed world scenarios." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "The approach proposed is quite naive in my opinion: use an existing link prediction approach to answer triple queries and combine the results according to the semantics of description logics operators. This approach has a number of limitations and does not really preserve the logical semantics of SROIQ: \n\n- the operators defined in the paper seem to differ from the one's I would expect in SROIQ. for instance I missed how complex role inclusions are handled. Further, I was surprised to see the 'self' operator in the definition, also this typically makes expressive description logics undecidable \n- The reasoning seems to rely on a the closed-world assumption (the evalution uses a closed word setting), which is not the semantics of SROIQ that uses open World semantics and features real negation as well as ways to implicitly formulate negation. \n\nThe evaluation - as mentioned above - is not really suited to show that the approach preserves SROIQ semantics. The setting is much closer to complex querying over a database than logical reasoning. Given this, I miss a comparison with more database-like approach to neural symbolic reasoning, e.g. based on datalog queries." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 2 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. are the datasets publicly available? \n\n2. line 210: \"mapping of DL syntax to a neural semantic syntax\". You are mapping \" .. syntax to .. syntax\", or \".. syntax to a neural semantics\"? \n\n3. what the neural architectures of the proposed method?\n\n4. In section 3, what is the novelty in methodology?" }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "This paper addresses an important issue of knowledge graph reasoning, and follows the embedding approach to deal with the incompleteness and inconsistency of knowledge graphs. The paper is very well polished, from writing to experiments." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a novel neural reasoner, dubbed EBR, reasoning over incomplete and inconsistent knowledge graphs. Authors propose a neural interpretation for the SROIQ semantics for Descriptive Logic. A substantial survey is conducted. Experiments are carried out in six datasets, with very good results -- achieving near-perfect results in the close world scenario." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "It is not clear what neural architectures are used. The datasets used in the experiments are not those used by SOTA papers. After inspecting the supplementary material, I see codes, but do not find datasets. Authors described in section 2.3 that CQD and other neural logical query methods do not support negation, universal restriction and cardinality restriction. However, Beta-E supports:\n \nBeta Embeddings for Multi-Hop Logical Reasoning in Knowledge Graphs. H. Ren, J. Leskovec. Neural Information Processing Systems (NeurIPS), 2020.\n\nAnd CQD defines the complementary t-conorm as \\bottom (x,y) = 1 - \\top(1-x, 1-y). This automatically follows ways of definitions of negation. If \\top(x,y) = min(x,y), then, -x = -\\top(x,x) = -\\top(1- (1-x), 1- (1-x)) = \\bottom(1-x, 1-x) -1. \n\nIn Table 2, the neural semantics of \\Delta^\\mathcal{I} and \\emptyset are the same as the semantics of those in Table 1." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "This paper introduces a neural reasoner that leverages embeddings to approximate symbolic reasoning in description logic, enabling concept learning from incomplete, and erroneous knowledge bases." }, "_bibtex": { "value": "@inproceedings{\nanonymous2024neural,\ntitle={Neural Description Logic Reasoning over Incomplete Knowledge Bases},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4qRCiEZGKd},\nnote={under review}\n}" }, "abstract": { "value": "Concept learning exploits background knowledge in the form of description logic axioms to learn explainable classification models from knowledge bases. Despite recent breakthroughs in the runtime of concept learners, most approaches still cannot be deployed on real-world knowledge bases. This is due to their use of description logic reasoners, which do not scale to large datasets. Moreover, these reasoners are not robust against inconsistencies and erroneous data, both being hallmarks of real datasets. We address this challenge by presenting a novel neural reasoner dubbed \\approach. Our reasoner relies on embeddings to rapidly approximate the results of a symbolic reasoner. We show that our reasoner solely requires retrieving instances for atomic concepts and existential restrictions to retrieve the instances of any concept in $\\mathcal{SROIQ}$. Importantly, our experiments also suggest that our reasoner is robust against missing and erroneous data." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "concept learning", "description logic", "knowledge bases", "neural reasoner", "embeddings", "SROIQ", "atomic concepts" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/b7f8242765d25596d603babb3295230dbcfdb06b.pdf" }, "presentation": null, "primary_area": { "value": "other topics in machine learning (i.e., none of the above)" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/edf94bc65e74abc5cb65c7e38c741a23de5cd01b.zip" }, "title": { "value": "Neural Description Logic Reasoning over Incomplete Knowledge Bases" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4qh6nurdYt
Effective Learning with Node Perturbation in Multi-Layer Neural Networks
main
Active
efficient machine learning;optimization
optimization
5;6;6
4;4;4
2;3;3
2;3;3
3;3;3
5.666667
4
2.666667
2.666667
3
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "Could you elaborate on the implementation details in convolutional networks, particularly regarding how weight sharing was handled?" }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "The manuscript is clearly written and well-motivated. Moreover, the numerical experiments convincingly demonstrate that decorrelation improves the performance of NP." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this manuscript, the authors propose an improved algorithm for node perturbation (NP) with two key modifications compared to the standard NP. First, the weight update is calculated using the total change in activity at each hidden node instead of the direct perturbation at that node. Secondly, the algorithm incorporates a decorrelation step at each layer to minimize noise correlations. The authors demonstrate numerically that decorrelation robustly enhances the performance of NP in deep neural networks trained on the CIFAR-10 dataset. They also show that using the total change in activity for updates outperforms the vanilla NP in convolutional neural networks." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "Section 2.1.2, especially L127-L128, provides an impression that INP is required to make the update unbiased. However, the vanilla node perturbation is known to converge to the true backpropagation at the infinite sample limit if the noise added to each layer is independent of each other (Fiete et al., 2007; Hiratani et al., 2022). \n\nOn the contrary, the ANP update rule is biased against back-propagation. This can be demonstrated in a one-hidden layer non-linear network $y = W_2 f (W_1 x)$ with a loss function L(y). \nGiven perturbations $h v_1$, $h v_2$, at $h \\to 0$ limit, the ANP update for the second layer is\n$$\\begin{eqnarray}\n\\Delta W_2 \n&=& \\frac{1}{h} \\langle (L(\\tilde{y}) - L(y)) (v_2 + W_2 [f'(W_1 x) \\odot v_1] ) f(W_1 x)^T \\rangle \n\\nonumber \\\\\\\\\n&=& \\langle (v_2 + W_2 [f' (W_1 x) \\odot v_1] ) (v_2 + W_2 [f' (W_1 x) \\odot v_1] )^T \\rangle \\frac{\\partial L(y)}{\\partial y} f (W_1 x)^T \n\\nonumber \\\\\\\\\n&=& (I + W_2 \\text{diag} [ f'(W_1 x)^2 ] W_2^T) \\frac{\\partial L(y)}{\\partial y} f(W_1 x)^T\n\\end{eqnarray}$$\nBecause the true gradient is \n$\\frac{\\partial L(y)}{\\partial W_2} = \\frac{\\partial L(y)}{\\partial y} f(W_1 x)^T$,\nthe ANP update rule above is biased against the true gradient, implying that the claims made in section 2.1 are inaccurate. \n\nGiven this bias, it is unclear why ANP achieves better alignment with backpropagation in Figure 2. Nevertheless, biased updates can sometimes facilitate faster learning, as discussed in Song, Millidge, et al., Nature Neuroscience, 2024. Therefore, the results shown in the bottom panel of Figure 4 are potentially interesting.\n\nRegarding comparison with BP, empirical results are presented in a somewhat misleading way. A key limitation of NP is its need for a low learning rate, which means performance comparisons with BP should be conducted across a wide range of learning rates." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1. **Variance and Convergence of the Loss Gradient**: Could the authors provide an analysis of the variance in the estimated loss gradient as a function of the number of samples? Additionally, what is the typical convergence rate of the loss with an increasing number of samples? This data would offer valuable insights into the scalability and efficiency of the proposed methods.\n\n2. **Impact of the Decorrelation Step**: Can the authors confirm that the observed performance improvements are not solely driven by the decorrelation rule? From Figure 4, it is unclear whether INP and ANP contribute significantly without the decorrelation step. What results would be obtained if only the decorrelation step was implemented, followed by NP applied solely to train the readouts?\n\nWhile I have additional questions, addressing these principal concerns would be pivotal in reconsidering the rating of this work." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "**Biological Motivation**: The exploration of node perturbation (NP) as an alternative to backpropagation is compelling due to its alignment with plausible biological mechanisms, negating the need for backward passes and allowing learning from reward signals. Previous work on NP suffers from poor performance and reliance on specific and accurate noise control. This work improved on previous studies by offering significant improvements that could potentially make NP a competitive framework.\n- **Innovative Formulations**: The introduction of iterative node perturbation (INP) and activity-based node perturbation (ANP) adds theoretical depth, notably linking perturbation approaches with directional derivatives and improving the stability of NP in noisy environments. In particular, the authors show that the loss gradient can be computed without precise control over the noise process. This solution is elegant and informative.\n- **Decorrelation Mechanism**: The incorporation of input decorrelation as an unsupervised learning mechanism demonstrates clear improvements in convergence speed, adding practical value to NP and its variants, while maintaining biological plausibility." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "Backpropagation (BP) is the standard for training deep neural networks but is criticized for its lack of biological plausibility and computational complexity due to separate forward and backward phases. Node Perturbation (NP) offers an alternative approach by injecting noise into hidden layer activities and using the resulting change in the loss function to guide weight updates. However, traditional NP is inefficient, unstable, and requires precise noise control, limiting its practical utility and biological relevance.\n\nThis study extends NP by introducing more robust formulations. It reframes NP using the concept of directional derivatives, leading to an iterative approach (INP) that better aligns with BP in terms of gradient estimation. Additionally, the paper presents an activity-based variant (ANP) that estimates the gradient using differences between clean and noisy activations, thus bypassing the need for precise noise measurement. A key contribution is integrating a layer-wise input decorrelation mechanism, which mitigates the bias in NP updates and accelerates convergence.\n\nNumerical experiments demonstrate that these modified NP algorithms, particularly when combined with input decorrelation, significantly enhance performance compared to standard NP and, in some cases, approach BP-level accuracy. The study also shows that these methods can be extended to noisy systems where the noise process is not directly observable, making them applicable to both neuromorphic computing and potential models of biological learning." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. **Scalability Limitations**: While the paper suggests that scaling to larger problems could be addressed through parallelization, this solution conflicts with the biological motivation emphasized throughout the text. The authors should reconcile this discrepancy by exploring biologically feasible alternatives or clarifying the practical biological implications. Notably, this approach is clearly relevant for neuromorphic computing, particularly since the noise perturbations in this framework can be arbitrarily small.\n \n2. **Gradient Approximation**: Theoretical analyses (e.g., Equation 4) focus on mean gradients. Still, the role of noise variance and the number of noisy samples in the stability and efficiency of gradient estimates are underexplored. Since the authors emphasize the framework's efficiency, claiming high performance can be achieved with a small number of noisy passes, the typical, rather than the mean loss gradient, should be analyzed. \n\n3. **Decorrelation Analysis**: Adding an unsupervised learning rule for input decorrelation in each layer is an intriguing and potentially beneficial approach. However, the paper’s analysis of this aspect is insufficient, both numerically and theoretically. The improvement observed from decorrelating inputs, as demonstrated in Figure 3, is unsurprising. In a single-layer architecture, this step functions similarly to an additional linear transformation or data preprocessing step, which is expected to yield performance gains. This effect diminishes the novelty of the finding.\n Furthermore, while Figure 4 shows notable improvements in BP training accuracy with decorrelation, the minimal test accuracy gains suggest overfitting, indicating that the method primarily accelerates convergence without enhancing generalization. This point needs further exploration to determine the trade-offs between train and test performance. Moreover, Figure 4 highlights that input decorrelation does not enhance—and may even degrade—performance in convolutional networks. This discrepancy calls for a more thorough investigation into the conditions under which decorrelation aids or hinders performance. The authors should address these limitations and clarify whether decorrelation consistently benefits deeper and more complex architectures, or if its effectiveness is limited to simpler cases.\n \n4. **Learning in the deep hidden layers**: Figure 2 indicates that the gradient alignment in the output weights closely matches that of BP, which may be due to the low-dimensional nature of the output space. This raises a concern that the observed performance, which falls short of BP’s, could be primarily driven by the readout weights, potentially bolstered by the unsupervised decorrelation step applied to the layer activities. This implies that the NP algorithms may contribute minimally to learning in the deeper layers. \n To address this issue, the authors should provide evidence that ANP/INP enhances learning throughout the network, rather than merely acting as a support for the final readout layer. Specifically, they should demonstrate that these algorithms outperform a simpler baseline approach involving unsupervised learning in the hidden layers followed by SGD or NP for training the output layer.\n### Minor Comments\n- **Appendix Clarifications**: The derivations in Appendix C do not add significant new insights beyond the main text and should be expanded to include formal proofs that strengthen the theoretical claims made in the main manuscript.\n- **Clarification of Sample Averaging**: The use of a noise direction vector $v$ for each sample should be clearly articulated to explain how this averaging over noise directions ensures accurate gradient approximation." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "These questions/suggestions mostly add on top of what I mentioned in the weaknesses:\n* What happens to the update angle during the training. Do you observe a better alignment close to convergence? If this is interesting it could be added to Figure 2.\n* I am pretty surprised about the results with a fully connected network trained with BP, as it is pretty common to see these networks obtaining >60% accuracy on CIFAR-10. Could you elaborate more on the choice of hyperparameters?\n* I believe is always important to include a code repository in these papers as it helps the other researchers in the field and makes the experiments easier to reproduce." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "I believe the paper is well structured and well written, the results are interesting and overall well defended. In detail:\n* The methods section is clear and straightforward. I appreciated the incremental structure that starts from Node Perturbation ad adds on the newly proposed variations explaining well the contribution of each piece.\n* The results prove the claims of the authors regarding how each variation of NP compares, and I appreciate using Tiny ImageNet as a benchmark which is more challenging of what is usually found in these types of papers.\n* I found section 3.4 very interesting, as most methods I know of rely on having non-noisy systems. I think removing the assumption of having a clean and noisy pass, and assuming all passes are noisy makes it a very interesting algorithm." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "In this paper, the authors extend an existing framework to train multi-layer neural networks without using backpropagation (BP). Node perturbation (NP) relies on injecting noise in the network layers, measuring the in the loss differential between the clean and noisy pass, and computing a layer-wise update which relies on the noise vector, the pre-synaptic activity and the loss differential. The authors improve on the NP framework with three main contributions:\n* In the traditional NP approach, the effect of the layer $\\ell$ noise $\\epsilon_\\ell$ on the downstream layers is unaccounted for. By computing the directional derivative of the loss on the noise injected at layer $\\ell$ ($\\nabla_v \\mathcal{L}$), they can more precisely target the updates to layer $\\ell$. This method, referred to as iterative node perturbation, requires $L+1$ forward passes, and relies on access to the noise vector.\n* Next, the authors propose activity-based noise perturbation (ANP). This approach relies on the assumption that all the layers are independent, which requires measuring the state difference between clean and noisy passes instead of the noise injected. This requires only two passes and does not require access to the noise signal, but rather its effect on the network.\n* Lastly, using an existing trainable decorrelation procedure, they show improved performance of their proposed algorithms by decorrelating the inputs to each layer.\n\nThese variations of NP are tested fully connected and convolutional networks on CIFAR-10 and Tiny ImageNet and compared to BP." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "I am giving this paper a 6 because of the following weaknesses I found, but I would be happy to re-evaluate if these concerns are addressed. My main concerns are:\n* The world bioplausible is mentioned throughout the papers, but details on how these algorithms could be implemented in biological neural networks are not provided. I believe the paper still stands without needing to justify it as bioplausible, so I believe that either removing the bioplausibility aspect (and exchange it with more details on possible hardware implementation) or providing more details about the bioplausibility would be better alternatives.\n* In the last years, many alternatives to back propagation that rely on multiple forward passes have been proposed. In particular, looking at equation (6) in the paper. For example, Dellaferrera and Kreiman, 2022, proposes to use the differential between a clean and \"noisy pass\" to train the network, where the noisy pass relies on a perturbation of the activity computed using the loss differential. Since there are many other algorithms like this (for example Hinton 2022, Kohan et al. 2018, Frenkel et al 2021), I believe a comparison in the methods section would be beneficial.\n* The figures position could be improved to make the paper more readable. For example Figure 1 could be pushed up in the paper as it summarizes the contributions of the paper, Figure 4 could be moved in section 3.3.\n* I think figure 2 could be improved. For example figures 2 right does not add anything that cannot be explained in words." }, "withdrawal_confirmation": null }, { "TLDR": { "value": "Input decorrelation and a mathematical reformulation of the update rule can greatly increase the convergence speed of Node Perturbation" }, "_bibtex": { "value": "@inproceedings{\nanonymous2024effective,\ntitle={Effective Learning with Node Perturbation in Multi-Layer Neural Networks},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4qh6nurdYt},\nnote={under review}\n}" }, "abstract": { "value": "Backpropagation (BP) remains the dominant and most successful method for training parameters of deep neural network models.\nHowever, BP relies on two computationally distinct phases, does not provide a satisfactory explanation of biological learning, and can be challenging to apply for training of networks with discontinuities or noisy node dynamics.\nBy comparison, node perturbation (NP) proposes learning by the injection of noise into network activations, and subsequent measurement of the induced loss change. NP relies on two forward (inference) passes, does not make use of network derivatives, and has been proposed as a model for learning in biological systems.\nHowever, standard NP is highly data inefficient and unstable due to its unguided noise-based search process.\nIn this work, we investigate different formulations of NP and relate it to the concept of directional derivatives as well as combining it with a decorrelating mechanism for layer-wise inputs.\nWe find that a closer alignment with directional derivatives together with input decorrelation at every layer strongly enhances performance of NP learning with large improvements in parameter convergence and much higher performance on the test data, approaching that of BP.\nFurthermore, our novel formulation allows for application to noisy systems in which the noise process itself is inaccessible." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "efficient machine learning", "optimization" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/5abbce4339441d08a7ef1381799844fc3421c055.pdf" }, "presentation": null, "primary_area": { "value": "optimization" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Effective Learning with Node Perturbation in Multi-Layer Neural Networks" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4qygYXJc0V
Accurate Forgetting for All-in-One Image Restoration Model
main
Withdraw
Image Restoration; Privacy Protection
applications to computer vision, audio, language, and other modalities
Xin Su;Zhuoran Zheng
~Xin_Su5;~Zhuoran_Zheng1
0
0
0
0
0
0
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": null, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": null, "primary_area": null, "questions": null, "rating": null, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": null, "summary": null, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": null, "withdrawal_confirmation": { "value": "I have read and agree with the venue's withdrawal policy on behalf of myself and my co-authors." } }, { "TLDR": null, "_bibtex": { "value": "@misc{\nsu2024accurate,\ntitle={Accurate Forgetting for All-in-One Image Restoration Model},\nauthor={Xin Su and Zhuoran Zheng},\nyear={2024},\nurl={https://openreview.net/forum?id=4qygYXJc0V}\n}" }, "abstract": { "value": "Privacy protection has always been an ongoing topic, especially for AI. Currently, a low-cost scheme called Machine Unlearning forgets the private data remembered in the model. Specifically, given a private dataset and a trained neural network, we need to use e.g. pruning, fine-tuning, and gradient ascent to remove the influence of the private dataset on the neural network. Inspired by this, we try to use this concept to bridge the gap between the fields of image restoration and security, creating a new research idea. We propose the scene for the All-In-One model (a neural network that restores a wide range of degraded information), where a given dataset such as haze, or rain, is private and needs to be eliminated from the influence of it on the trained model. Notably, we find great challenges in this task to remove the influence of sensitive data while ensuring that the overall model performance remains robust, which is akin to directing a symphony orchestra without specific instruments while keeping the playing soothing. Here we explore a simple but effective approach: Instance-wise Unlearning through the use of adversarial examples and gradient ascent techniques. Our approach is a low-cost solution compared to the strategy of retraining the model from scratch, where the gradient ascent trick forgets the specified data and the performance of the adversarial sample maintenance model is robust. Through extensive experimentation on two popular unified image restoration models, we show that our approach effectively preserves knowledge of remaining data while unlearning a given degradation type." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": { "value": [ "~Xin_Su5", "~Zhuoran_Zheng1" ] }, "authors": { "value": [ "Xin Su", "Zhuoran Zheng" ] }, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Image Restoration; Privacy Protection" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": { "value": "su|accurate_forgetting_for_allinone_image_restoration_model" }, "pdf": null, "presentation": null, "primary_area": { "value": "applications to computer vision, audio, language, and other modalities" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": null, "title": { "value": "Accurate Forgetting for All-in-One Image Restoration Model" }, "venue": { "value": "ICLR 2025 Conference Withdrawn Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Withdrawn_Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]
4rEI2JdHH6
Let Me Grok for You: Accelerating Grokking via Embedding Transfer from a Weaker Model
main
Active
Grokking;feature learning;deep learning theory
learning theory
3;3;5;5;6
4;3;3;4;3
2;3;2;3;3
2;2;3;3;2
4;3;2;2;4
4.4
3.4
2.6
2.4
3
-0.272166
[ { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "* Could you comment on why you chose a weak model with 3 neurons in Section 3.2? Would the story change if you had 4 of them and achieve perfect accuracy? My impression is that the delayed generalization mostly happens when you are heavily over-parameterized and can very quickly memorize all the training data in early epochs, which still wouldn't happen with 4 neurons. Also, would the transfer still work if the weak model had fewer than 3 neurons?\n\n* For the modular addition/multiplication problems, what would be the smallest weak model that can lead to good transferrable embeddings (in practice, but also in theory with an appropriate construction?)\n\n* For Theorem 3.2, is this in a kernel regime? Would training only the second layer instead give similar guarantees? Regardless, a brief discussion after the statement would be a good addition.\n\nOther, minor:\n* Figure 4: Should I infer from the caption that the test accuracy becomes high around the same point (near 100 epochs), while the larger model also displays the property that training accuracy is high very quickly? Also, is it reasonable to say that the small model does not exhibit grokking here? Please clarify these points since they seem important for the story.\n* L 298: Do you mean that whenever the test accuracy is around 75% or above, you found empirically that the neurons consist of three such features? Please rephrase as this is not very clear." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "Grokking is a surprising phenomenon that leads to seemingly unpredictable outcomes. The proposed GrokTransfer method seems like an empirically effective to mitigate this. The theoretical claims also justify why good embeddings obtained from a small model can help accelerate generalization in a larger model. Overall, this makes the work interesting and significant." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper studies ways to reduce the \"delayed generalization\" in grokking, by first learning embeddings in a smaller, faster model where generalization isn't delayed, and then transferring the embeddings to a larger model. This is shown empirically in various examples including modular addition, multiplication and parity with fully-connected nets or transformers. The authors also show theoretically that fast generalization occurs in XOR when applying the GrokTransfer technique on top of a weak solution that was found with only 3 neurons (this solution is shown to be found empirically)." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "* The theory on XOR would benefit from a better understanding of the \"weaker model\", which is currently mostly empirical. It would great to have even partial results for this part, even in a simpler setting (e.g. training a single neuron on this data might be tractable and similar to previous work?)\n\n* The presentation could be improved in various ways, in particular the notion of \"small model\" considered isn't very precisely defined and would benefit from clarification. Other aspects that could be discussed further include trade-offs between width and iterations needed to generalize: it seems that smaller models reduced the \"delayed generalization\" but also slow down convergence in terms of epochs. Perhaps this changes when looking at \"compute/flops\", and it would be good to include such plots.\n\nSee also the questions below." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 3 }, "primary_area": null, "questions": { "value": "1) In section 3.2, could you add more details on getting the SNR with different embeddings?\n\n2) The intuition on why the data embedding make a difference on the generalization speed is still not very clear. \n\nIf we take all layers beyond the last linear layer (which includes data embedding layer and some hidden layer) as doing feature engineering, as mentioned in [1], will this claim (in section 3.2, ``with this new embedding, data points are well separated in a three-dimensional space with a relatively high signal-to-noise ratio (SNR) compared to the original embedding'') still hold if we pass this data embedding into above hidden layers? Does the separation among the feature vectors got from the last hidden layer make more sense than the one among data embeddings?\n\n\n\n[1] Papyan, V., Han, X. Y., & Donoho, D. L. (2020). Prevalence of neural collapse during the terminal phase of deep learning training. Proceedings of the National Academy of Sciences, 117(40), 24652-24663." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "1) This paper is clearly written and the idea is easy to follow.\n2) The choice of the task (XOR) and the setting of the model (very simple network) is easy to interpret for the purpose of the finding of this paper." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper proposes a method to accelerate grokking via embedding transfer from a week model. The authors first observe that data embedding plays a crucial role in determining whether generalization is delayed. Then they initialize the embedding weights extracted from training a smaller and weaker model to the target, strong model. Finally, the authors give rigorous proof on a synthetic XOR task with simple network settings, and provide empirical studies showing the effectiveness of the proposed GrokTransfer method on fully-connected network and transformers." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1) the key observation which is that the data embedding plays a crucial role in the generalization delay is not well studied. As the proposed accelerating method is based on this observation, it would be better to provide more evidence to say this observation is indeed the correct one. In addition to the modular addition task, providing more results on other tasks would make this observation more convincing. Also, instead of data embedding layer only, is the initialization of other layers a possible reason to cause the delay? An ablation study on other possible reasons to cause the delay would be better.\n\n2) In addition to simple settings, verification on more complicated tasks would make the finding more general and solid.\n\n3) In the theoretical analysis, the proof seems a little bit limited because the choice of very simple setting.\n\n4) It would be better if the authors make a clearer study on how to choose the \"weaker and smaller\" model to get the acceleration with good test performance." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- Would a “smoother” version of the proposed method work even better? For example, one could start from a single embedding layer with a small number of neurons and gradually deepen/widen it, i.e., adding more layers and adding more neurons to each layer." }, "rating": { "value": 6 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 3 }, "strengths": { "value": "- The proposed method is interesting and novel to my knowledge. It first trains an under-parameterized model that is incapable of perfectly interpolating the data, and then uses the learned data embedding to facilitate the grokking of an over-parameterized model. It’s like approaching a problem by first crafting a simple but general solution that works well for most cases and then refining the solution to cover all the rest of the cases. Intuitively, this eliminates a lot of unnecessary competition among various not-so-simple solutions in an over-parameterized model, hence accelerating the learning process.\n- The paper is very well written. It includes an extensive discussion of the related work, a clear presentation of the motivation behind the proposed method, and a detailed case study that explains how and why the method works." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "This paper investigates \"grokking,\" a phenomenon where neural networks initially struggle to generalize, only to suddenly achieve near-perfect generalization after extended training. To accelerate generalization, the authors propose GrokTransfer. GrokTransfer leverages the embedding from a smaller, preliminary model that achieves moderate test performance, transferring it to a stronger target model to speed up generalization. This approach successfully eliminates delayed generalization on a synthetic XOR task and performs well across various algorithmic tasks on both fully connected networks and Transformers. The results suggest that GrokTransfer could reshape training dynamics, enabling models to generalize without delay." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- It is unclear how well the method would generalize and scale to more complicated problems where such acceleration can make a real impact. The algorithmic problems are useful for analysis but are too simple in comparison with real-world problems which often involve high-dimensional inputs. For high-dimensional inputs with many redundant features, it is likely for the weaker model to lock onto degenerate solutions that would hinder the stronger model from grokking. It would be interesting to see if this is really the case or not.\n- The paper does not provide much insight on how to design or choose the weaker model. Clearly, the weaker model can neither be too weak nor too strong, i.e. there is a trade-off. If it is too weak, the solution may degenerate and thus make it harder for the stronger model to grok. If the weaker model is too strong, then it may take too much time to train the weaker model. Therefore, for this method to be truly useful, there should be some general rule of thumb for choosing the weaker model, otherwise, much time could be wasted on trial and error.\n- As the authors have mentioned in the paper, the theoretical result only considers a relatively simple XOR task. There does not seem to be any clear indication if the analysis could potentially be applied to more general problems. Therefore, the significance of this result is in doubt." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 3 }, "contribution": { "value": 3 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 2 }, "primary_area": null, "questions": { "value": "1. Regarding W1, the FNN->transformer setting was tested on only one task. I would like to see GrokTransfer's performance on more algorithmic tasks in this setting, especially in comparison to GrokFast. According to Table 1, training is very fast with GrokTransfer, so it should be quick to perform experiments on additional tasks (including those not tested in the paper). \n\n2. Grokking is known to occur beyond algorithmic data [1], which is already cited in the paper. I would like to see how GrokTransfer performs on non-algorithmic tasks, such as image classification with MNIST, as explored in [1].\n\n3. A new paper [2] has been recently released on accelerating grokking through weight transfer based on the Kolmogorov-Arnold (KA) representation theorem. Your approach seems simpler, but the approach in [2] can even handle two non-standard arithmetic tasks—composition of operations and systems of equations. How does GrokTransfer compare to [2], in terms of theoretical basis, performance, and practicality?\n\n[1] Ziming Liu, Eric J Michaud, and Max Tegmark. Omnigrok: Grokking beyond algorithmic data. In The Eleventh International Conference on Learning Representations, 2023.\n\n[2] Yeachan Park, Minseok Kim, & Yeoneung Kim. Acceleration of Grokking in Learning Arithmetic Operations via Kolmogorov-Arnold Representation. arXiv preprint arXiv:2405.16658, 2024\n\nMinor\n- Typo at line 255: “need [to] learn”\n- There are reference mistakes in the paper, concerning arXiv citations that should point to conference proceedings. Here is an example of wrong references:\n\nTanishq Kumar, Blake Bordelon, Samuel J Gershman, and Cengiz Pehlevan. Grokking as the transition from lazy to rich training dynamics. arXiv preprint arXiv:2310.06110, 2023.\n(Published in ICLR 2024)\n\nNote: the minor points did not influence my final score." }, "rating": { "value": 5 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "1. The authors provide a sensible justification for the core idea of their method through a motivating experiment in Section 2.1. \n2. They demonstrate its effectiveness both empirically and theoretically, highlighting improvements in computational costs and performance.\n3. The proposed method is simple and straightforward, and if it can generalize to broader tasks and architectures, it has significant potential." }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces GrokTransfer, a method that expedites grokking by transferring embeddings from a weaker model. Through a simple XOR classification task, the authors offer both theoretical and empirical justification for GrokTransfer. Experiments on other algorithmic tasks show its effectiveness in embedding transfer from FNN to FNN/transformers." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "1. The authors tested three algorithmic tasks—modular addition, modular multiplication, and the (40,3)-parity task—for FNN->FNN transfers. However, they provided results for only one task (modular addition) in the FNN->transformers setting. Given that FNN->transformer transfers are more practical and high in potential, there seems to be no reason to exclude modular multiplication and the (40,3)-parity task. Without these experiments, I can't help but think that the generalizability of GrokTransfer is limited." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": null, "abstract": null, "anonymous_url": null, "authorids": null, "authors": null, "code_of_conduct": { "value": "Yes" }, "code_of_ethics": null, "comment": null, "confidence": { "value": 4 }, "contribution": { "value": 2 }, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": { "value": [ "No ethics review needed." ] }, "keywords": null, "large_language_models": null, "no_acknowledgement_section": null, "other_comments_on_LLMs": null, "paperhash": null, "pdf": null, "presentation": { "value": 4 }, "primary_area": null, "questions": { "value": "- Do the conditions in the theoretical analysis, under which grokking is mitigated for the XOR task, have any implications on what are the causes of grokking? For instance, it possibly suggests several several causes of grokking, including the SNR, overparametrization (A5), initialization (A3), etc. \n- Are there failure cases of the method? I do not see mentions of them in the limitations." }, "rating": { "value": 3 }, "reciprocal_reviewing": null, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": { "value": 2 }, "strengths": { "value": "- The method is clearly described and easily understandable and implementable.\n- The combined time for the proposed method appears much faster than the baseline method of training the original target model from scratch. Table 1 suggests it is more than 5x faster. However ablations are needed to determine whether the speedup comes from architectural modifications, or from the proposed method.\n- For the tasks studied, the model performs well in terms of reducing grokking and accelerating convergence (although comparisons are not apples-to-apples -- see Weakness 1)" }, "student_author": null, "submission_guidelines": null, "summary": { "value": "The paper introduces a method to mitigate grokking, which describes the scenario in which models overfit to the training dataset long before it begins to generalize to unseen test data. This is done by training a smaller model with a much lower embedding (number of features in first layer) dimension, which converges quicker and without grokking. The architecture of the original model is then modified by replacing the first embedding layer with the product of two matrices $E_T = AB$ where $A$ and $B$ are of much lower rank. $A$ is initialized with the embedding layer of the smaller model. This model is evaluated on several synthetic tasks -- modular addition, modular multiplication, and a (40,3)-parity task defined as learning $y = \\Pi_{i \\in \\{1,2,3\\}} x_i$ where $x \\in \\{\\pm 1\\}^{40}$. Experiments show that compared to the original model that is randomly initialized, the resulting model mitigates grokking and converges faster. Theoretical analysis is also provided for a specific case involving learning $y = x_1$ XOR $x_2$ for a 80K dimensional vector $x$." }, "supplementary_material": null, "title": null, "venue": null, "venueid": null, "weaknesses": { "value": "- My main concern is that all the numbers reported for GrokTransfer and the target model (Large) are not directly/fairly comparable. The model used for GrokTransfer parameterizes the first layer with $E_T = AB$ where $A$ and $B$ are (very) low-rank matrices. On the other hand, the original target model uses the full $d_v \\times d_T$ matrix which is significantly larger. The number of parameters present in this layer is now much larger than that of $E_T = AB$ . A fair comparison should not even consider target models with the full $d_v \\times d_T$ embedding layer, but only those parametrized with $AB$ , where, for instance, $A$ can be randomly initialized instead of being initialized from the weaker model. As such, it is difficult to conclude anything regarding whether the improvement comes from weak model initialization, or simply from architectural modifications, from any of the existing results.\n- This defeats the purpose of studying/mitigating grokking in the first place, where the goal is to be able to train over-parametrized models with minimal grokking. This method instead replaces the over-parametrized model with one of more manageable complexity, which ties to the next weakness\n- Method also greatly reduces the expressivity of the original target model, by adding a very low-rank bottleneck to the first layer. While this works for the synthetic closed-form tasks considered in the paper, it suggests that (1) the original target model architecture is clearly ill-suited, since it converges perfectly even after introducing a very low-rank bottleneck, and (2) ability of the method to generalize to more complex tasks which require greater expressivity is limited and questionable.\n- The general motivation of the method appears to be applying dimensionality reduction to pre-process inputs that have very low SNR. This opens up questions regarding how the method compares to classical techniques like LDA?\n- Method introduces an additional layer of complexity in the training pipeline, due to having to train an additional model which would require its own architectural and hyperparameter sweeps.\n- Minor comments on notation: Inconsistent notation switching between $d_{embed}$ and $d_W$ / $d_F$, which makes it hard to look up. Notation on line $y=x_1 x_2$ is also ambiguous, I assume the multiplication operation here is XOR. Also overloaded notation for $p$ for modulo and dimension of input in XOR task." }, "withdrawal_confirmation": null }, { "TLDR": null, "_bibtex": { "value": "@inproceedings{\nanonymous2024let,\ntitle={Let Me Grok for You: Accelerating Grokking via Embedding Transfer from a Weaker Model},\nauthor={Anonymous},\nbooktitle={Submitted to The Thirteenth International Conference on Learning Representations},\nyear={2024},\nurl={https://openreview.net/forum?id=4rEI2JdHH6},\nnote={under review}\n}" }, "abstract": { "value": "''Grokking'' is a phenomenon where a neural network first memorizes training data and generalizes poorly, but then suddenly transitions to near-perfect generalization after prolonged training. While intriguing, this delayed generalization phenomenon compromises predictability and efficiency. Ideally, models should generalize directly without delay. To this end, this paper proposes GrokTransfer, a simple and principled method for accelerating grokking in training neural networks, based on the key observation that data embedding plays a\ncrucial role in determining whether generalization is delayed. GrokTransfer first trains a smaller, weaker model to reach a nontrivial (but far from optimal) test performance. Then, the learned input embedding from this weaker model is extracted and used to initialize the embedding in the target, stronger model. We rigorously prove that, on a synthetic XOR task where delayed generalization always\noccurs in normal training, GrokTransfer enables the target model to generalize directly without delay. Moreover, we demonstrate that, across empirical studies of different tasks, GrokTransfer effectively reshapes the training dynamics and eliminates delayed generalization, for both fully-connected neural networks and Transformers." }, "anonymous_url": { "value": "I certify that there is no URL (e.g., github page) that could be used to find authors’ identity." }, "authorids": null, "authors": null, "code_of_conduct": null, "code_of_ethics": { "value": "I acknowledge that I and all co-authors of this work have read and commit to adhering to the ICLR Code of Ethics." }, "comment": null, "confidence": null, "contribution": null, "desk_reject_comments": null, "details_of_ethics_concerns": null, "flag_for_ethics_review": null, "keywords": { "value": [ "Grokking", "feature learning", "deep learning theory" ] }, "large_language_models": null, "no_acknowledgement_section": { "value": "I certify that there is no acknowledgement section in this submission for double blind review." }, "other_comments_on_LLMs": null, "paperhash": null, "pdf": { "value": "/pdf/d395e4d898f4e5b63a41a76babcc7893177cb8f1.pdf" }, "presentation": null, "primary_area": { "value": "learning theory" }, "questions": null, "rating": null, "reciprocal_reviewing": { "value": "I understand the reciprocal reviewing requirement as described on https://iclr.cc/Conferences/2025/CallForPapers. If none of the authors are registered as a reviewer, it may result in a desk rejection at the discretion of the program chairs. To request an exception, please complete this form at https://forms.gle/Huojr6VjkFxiQsUp6." }, "resubmission": null, "revert_desk_rejection_confirmation": null, "revert_withdrawal_confirmation": null, "soundness": null, "strengths": null, "student_author": null, "submission_guidelines": { "value": "I certify that this submission complies with the submission instructions as described on https://iclr.cc/Conferences/2025/AuthorGuide." }, "summary": null, "supplementary_material": { "value": "/attachment/488c46d1ad77d0936a1b29bb6b15e04f3e10aa99.zip" }, "title": { "value": "Let Me Grok for You: Accelerating Grokking via Embedding Transfer from a Weaker Model" }, "venue": { "value": "ICLR 2025 Conference Submission" }, "venueid": { "value": "ICLR.cc/2025/Conference/Submission" }, "weaknesses": null, "withdrawal_confirmation": null } ]