You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
title={Edge-{LLM}s: Edge-Device Large Language Model Competition},
92
+
author={Shiwei Liu and Kai Han and Adriana Fernandez-Lopez and Ajay Kumar Jaiswal and Zahra Atashgahi and Boqian Wu and Edoardo Ponti and Callie Hao and Rebekka Burkholz and Olga Saukh and Lu Yin and Andreas Zinonos and Tianjin Huang and Jared Tanner and Yunhe Wang},
93
+
booktitle={NeurIPS 2024 Competition Track},
94
+
year={2024},
95
+
url={https://openreview.net/forum?id=jeCMRoIn15}
96
+
}
97
+
89
98
@inproceedings{
90
99
mustafa2024gate,
91
100
title={{GATE}: How to Keep Out Intrusive Neighbors},
@@ -137,6 +146,7 @@ @inproceedings{
137
146
img={are-gats-out-of-balance.png}
138
147
}
139
148
149
+
140
150
@InProceedings{pmlr-v202-gadhikar23a,
141
151
title = {Why Random Pruning Is All We Need to Start Sparse},
142
152
author = {Gadhikar, Advait Harshal and Mukherjee, Sohom and Burkholz, Rebekka},
title={The Network Zoo: a multilingual package for the inference and analysis of gene regulatory networks},
193
+
journal={Genome Biology},
194
+
year={2023},
195
+
day={09},
196
+
volume={24},
197
+
number={1},
198
+
pages={45},
199
+
abstract={Inference and analysis of gene regulatory networks (GRNs) require software that integrates multi-omic data from various sources. The Network Zoo (netZoo; netzoo.github.io) is a collection of open-source methods to infer GRNs, conduct differential network analyses, estimate community structure, and explore the transitions between biological states. The netZoo builds on our ongoing development of network methods, harmonizing the implementations in various computing languages and between methods to allow better integration of these tools into analytical pipelines. We demonstrate the utility using multi-omic data from the Cancer Cell Line Encyclopedia. We will continue to expand the netZoo to incorporate additional methods.},
200
+
issn={1474-760X},
201
+
doi={10.1186/s13059-023-02877-1},
202
+
url={https://doi.org/10.1186/s13059-023-02877-1},
203
+
img={netzoo2.png}
204
+
}
205
+
206
+
@article{10.1093/nar/gkac1157,
207
+
author = {Shutta, Katherine H and Weighill, Deborah and Burkholz, Rebekka and Guebila, Marouen Ben and DeMeo, Dawn L and Zacharias, Helena U and Quackenbush, John and Altenbuchinger, Michael},
208
+
title = {DRAGON: Determining Regulatory Associations using Graphical models on multi-Omic Networks},
209
+
journal = {Nucleic Acids Research},
210
+
volume = {51},
211
+
number = {3},
212
+
pages = {e15-e15},
213
+
year = {2022},
214
+
abstract = {The increasing quantity of multi-omic data, such as methylomic and transcriptomic profiles collected on the same specimen or even on the same cell, provides a unique opportunity to explore the complex interactions that define cell phenotype and govern cellular responses to perturbations. We propose a network approach based on Gaussian Graphical Models (GGMs) that facilitates the joint analysis of paired omics data. This method, called DRAGON (Determining Regulatory Associations using Graphical models on multi-Omic Networks), calibrates its parameters to achieve an optimal trade-off between the network’s complexity and estimation accuracy, while explicitly accounting for the characteristics of each of the assessed omics ‘layers.’ In simulation studies, we show that DRAGON adapts to edge density and feature size differences between omics layers, improving model inference and edge recovery compared to state-of-the-art methods. We further demonstrate in an analysis of joint transcriptome - methylome data from TCGA breast cancer specimens that DRAGON can identify key molecular mechanisms such as gene regulation via promoter methylation. In particular, we identify Transcription Factor AP-2 Beta (TFAP2B) as a potential multi-omic biomarker for basal-type breast cancer. DRAGON is available as open-source code in Python through the Network Zoo package (netZooPy v0.8; netzoo.github.io).},
title={An online notebook resource for reproducible inference, analysis and publication of gene regulatory networks},
270
+
journal={Nature Methods},
271
+
year={2022},
272
+
day={01},
273
+
volume={19},
274
+
number={5},
275
+
pages={511-513},
276
+
issn={1548-7105},
277
+
doi={10.1038/s41592-022-01479-2},
278
+
url={https://doi.org/10.1038/s41592-022-01479-2},
279
+
abstract={Open access to software in computational and systems biology, including data, code and models, is widely acknowledged as essential for ensuring reproducibility of research results and reuse of methods1. Although there are software tools that allow sharing of computational pipelines, these systems generally do not allow the integration of software annotation and documentation at each step in the process — elements that are required to understand and run complex and rapidly evolving software, including methods developed in systems biology for inferring biological pathways. Our research team has been developing network inference and analysis methods, collected into the Network Zoo (http://netzoo.github.io), with implementations in R, C, MATLAB and Python. The growing community of users of these network resources, the increasing interest in learning how to apply network inference methods, and the need to ensure that published analyses are fully reproducible led us to develop Netbooks (http://netbooks.networkmedicine.org), a hosted collection of Jupyter notebooks that provide detailed and annotated step-by-step case studies of GRN analysis.},
280
+
img={nature-methods.jpg}
281
+
}
282
+
283
+
@misc{fischer2022lotteryticketsnonzerobiases,
284
+
title={Lottery Tickets with Nonzero Biases},
285
+
author={Jonas Fischer and Advait Gadhikar and Rebekka Burkholz},
286
+
year={2022},
287
+
eprint={2110.11150},
288
+
archivePrefix={arXiv},
289
+
primaryClass={cs.LG},
290
+
url={https://arxiv.org/abs/2110.11150},
291
+
}
292
+
188
293
@inproceedings{
189
294
fischer2022plant,
190
295
title={Plant 'n' Seek: Can You Find the Winning Ticket?},
@@ -195,6 +300,7 @@ @inproceedings{
195
300
pdf={https://openreview.net/pdf?id=9n9c8sf0xm},
196
301
abstract={The lottery ticket hypothesis has sparked the rapid development of pruning algorithms that aim to reduce the computational costs associated with deep learning during training and model deployment. Currently, such algorithms are primarily evaluated on imaging data, for which we lack ground truth information and thus the understanding of how sparse lottery tickets could be. To fill this gap, we develop a framework that allows us to plant and hide winning tickets with desirable properties in randomly initialized neural networks. To analyze the ability of state-of-the-art pruning to identify tickets of extreme sparsity, we design and hide such tickets solving four challenging tasks. In extensive experiments, we observe similar trends as in imaging studies, indicating that our framework can provide transferable insights into realistic problems. Additionally, we can now see beyond such relative trends and highlight limitations of current pruning methods. Based on our results, we conclude that the current limitations in ticket sparsity are likely of algorithmic rather than fundamental nature. We anticipate that comparisons to planted tickets will facilitate future developments of efficient pruning algorithms.},
abstract={The lottery ticket hypothesis conjectures the existence of sparse subnetworks of large randomly initialized deep neural networks that can be successfully trained in isolation. Recent work has experimentally observed that some of these tickets can be practically reused across a variety of tasks, hinting at some form of universality. We formalize this concept and theoretically prove that not only do such universal tickets exist but they also do not require further training. Our proofs introduce a couple of technical innovations related to pruning for strong lottery tickets, including extensions of subset sum results and a strategy to leverage higher amounts of depth. Our explicit sparse constructions of universal function families might be of independent interest, as they highlight representational benefits induced by univariate convolutional architectures.},
Copy file name to clipboardExpand all lines: _data/news.yml
+3-3Lines changed: 3 additions & 3 deletions
Original file line number
Diff line number
Diff line change
@@ -14,7 +14,7 @@
14
14
headline: "Rebekka is at [CPAL](https://cpal.cc/spotlight_track/) presenting three [papers](/publications) as recent spotlights."
15
15
16
16
- date: 13. February 2025
17
-
headline: "Celia is presenting her work on graph rewiring at Cohere Labs ([see talk here](/media/#celia-rubio-madrigal--cohere-labs-feb-13-2025))."
17
+
headline: "Celia is presenting her work on graph rewiring at Cohere Labs ([watch talk here](/outreach/#celia-rubio-madrigal--cohere-labs-feb-13-2025))."
18
18
19
19
- date: 22. January 2025
20
20
headline: "Two papers
@@ -36,7 +36,7 @@
36
36
headline: "Welcome to Chao, Rahul, and Dong!"
37
37
38
38
- date: 14. June 2024
39
-
headline: "Celia, Advait and Adarsh are presenting at the Helmholtz AI Conference: AI for Science ([HAICON](https://eventclass.it/haic2024/scientific/external-program/session?s=S-05a)) in Düsseldorf ([see talk here](/media/#celia-rubio-madrigal--haicon-jun-14-2024))."
39
+
headline: "Celia, Advait and Adarsh are presenting at the Helmholtz AI Conference: AI for Science ([HAICON](https://eventclass.it/haic2024/scientific/external-program/session?s=S-05a)) in Düsseldorf ([watch talk here](/outreach/#celia-rubio-madrigal--haicon-jun-14-2024))."
40
40
41
41
- date: 1. May 2024
42
42
headline: "Our paper on [improving GATs](https://openreview.net/forum?id=Sjv5RcqfuH) has been accepted at ICML 2024."
@@ -54,7 +54,7 @@
54
54
headline: "Our paper on [balancing GATs](https://openreview.net/forum?id=qY7UqLoora) has been accepted at NeurIPS 2023."
55
55
56
56
- date: 5. September 2023
57
-
headline: "We have received an ERC starting grant: [SPARSE-ML](https://cispa.de/en/erc-burkholz)."
57
+
headline: "We have received an ERC starting grant: [SPARSE-ML](https://cispa.de/en/research/grants/sparse-ml)."
58
58
59
59
- date: 24. April 2023
60
60
headline: "Our paper on [random pruning](https://openreview.net/forum?id=cKYIyT9wvo) has been accepted at ICML 2023."
abstract: "Deep learning continues to achieve impressive breakthroughs across disciplines but relies on increasingly large neural network models that are trained on massive data sets. Their development inflicts costs that are only affordable by a few labs and prevent global participation in the creation of related technologies. In this talk, we will ask the question if it really has to be like this and discuss some of the major challenges that limit the success of deep learning on smaller scales. We will give three examples of complimentary approaches that could help us address the underlying issues: (i) early neural network sparsification, (ii) the integration of useful inductive bias in the design of problem specific neural network architectures (with biomedical applications), and (iii) the improvement of training from scratch in the context of graph neural networks."
50
+
32
51
- title: "Pruning Deep Neural Networks for Lottery Tickets"
33
52
date: 2023-02-08
34
53
speaker: "Rebekka Burkholz"
@@ -39,7 +58,7 @@ talks:
39
58
authors: "Rebekka Burkholz, Nilanjana Laha, Rajarshi Mukherjee, and Alkis Gotovos"
40
59
conference: ICLR 2022
41
60
link: https://openreview.net/pdf?id=SYB4WrJql1n
42
-
- title: "Plant ’n’ Seek: Can You Find the Winning Ticket?"
61
+
- title: "Plant 'n' Seek: Can You Find the Winning Ticket?"
description: "My main goal is to develop efficient deep learning algorithms that are robust to noise, require small sample sizes, and are generally applicable in the sciences. My work is founded in theory with implications for real world applications and is often characterized by a complex network science perspective. My favourite applications and sources of inspiration are currently the biomedical domain, pharmacy, and physics. My group is supported by the ERC starting grant [SPARSE-ML](https://cispa.de/en/erc-burkholz)."
9
+
description: "I lead the Relational Machine Learning Group at the Helmholtz Center CISPA since 2021. Our research on sparse deep learning is funded by an [ERC starting grant](https://cispa.de/en/research/grants/sparse-ml) since December 2023 and by Apple Research since August 2025. From 2019-2021, I was a PostDoc at the Biostatistics Department of the Harvard T.H. Chan School of Public Health working with John Quackenbush, from 2017-2018 at the Institute for Machine Learning at ETH Zurichwith Joachim Buhmann, and from 2016-2017 at the Chair of Systems Design at ETH Zurich with Frank Schweitzer. My PhD research at the ETH Risk Center was supervised by Frank Schweitzer and co-supervised by Hans J. Herrmann from 2013-2016. My thesis on systemic risk won the Zurich Dissertation Prize and our work on international maize trade received the CSF Best Contribution Award. I studied Mathematics and Physics at TU Darmstadt."
description: "My research addresses generalization challenges in graph learning, focusing on the dual role of input graphs as both data and computation structures, and the effects of modifying them under different criteria."
52
+
description: "My research addresses generalization challenges in graph learning, focusing on the dual role of input graphs as both data and computation structures, and the effects of modifying them under different criteria. I hold two degrees in Mathematics and Computer Science from Universidad Complutense de Madrid, and I received the prestigious postgraduate fellowship from la Caixa Foundation in 2022. This allowed me to study a master's at Strathclyde in the UK, where I was awarded the Best Overall Performance Prize."
We are an ML research group led by [Dr. Rebekka Burkholz](https://sites.google.com/view/rebekkaburkholz). We invite you to explore our research interests and our latest [publications](publications) in top-tier conferences (NeurIPS, ICML, ICLR), and to watch some of our [talks](media/#talks).
15
+
We are an ML research group led by [Dr. Rebekka Burkholz](https://sites.google.com/view/rebekkaburkholz). We invite you to explore our research interests and our latest [publications](publications) in top-tier conferences (NeurIPS, ICML, ICLR), and to watch [videos](outreach/#videos) of some of our talks.
16
16
17
17
We are part of the [CISPA Helmholtz Center for Information Security](https://cispa.de), at the [Saarland University](https://www.uni-saarland.de) campus in Saarbrücken, Germany.
0 commit comments