jpcorb20 commited on
Commit
0c60932
·
verified ·
1 Parent(s): 12000dc

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +26 -6
README.md CHANGED
@@ -206,14 +206,34 @@ This project may contain trademarks or logos for projects, products, or services
206
 
207
  ## Citation
208
 
209
- @article{corbeil2025modular,
210
- title={A Modular Approach for Clinical SLMs Driven by Synthetic Data with Pre-Instruction Tuning, Model Merging, and Clinical-Tasks Alignment},
211
- author={Corbeil, Jean-Philippe and Dada, Amin and Attendu, Jean-Michel and Abacha, Asma Ben and Sordoni, Alessandro and Caccia, Lucas and Beaulieu, Fran{\c{c}}ois and Lin, Thomas and Kleesiek, Jens and Vozila, Paul},
212
- journal={arXiv preprint arXiv:2505.10717},
213
- year={2025}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214
  }
215
 
216
-
217
  ## Model Card Authors
218
 
219
  Jean-Philippe Corbeil
 
206
 
207
  ## Citation
208
 
209
+ @inproceedings{corbeil-etal-2025-modular,
210
+ title = "A Modular Approach for Clinical {SLM}s Driven by Synthetic Data with Pre-Instruction Tuning, Model Merging, and Clinical-Tasks Alignment",
211
+ author = "Corbeil, Jean-Philippe and
212
+ Dada, Amin and
213
+ Attendu, Jean-Michel and
214
+ Ben Abacha, Asma and
215
+ Sordoni, Alessandro and
216
+ Caccia, Lucas and
217
+ Beaulieu, Francois and
218
+ Lin, Thomas and
219
+ Kleesiek, Jens and
220
+ Vozila, Paul",
221
+ editor = "Che, Wanxiang and
222
+ Nabende, Joyce and
223
+ Shutova, Ekaterina and
224
+ Pilehvar, Mohammad Taher",
225
+ booktitle = "Proceedings of the 63rd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)",
226
+ month = jul,
227
+ year = "2025",
228
+ address = "Vienna, Austria",
229
+ publisher = "Association for Computational Linguistics",
230
+ url = "https://aclanthology.org/2025.acl-long.950/",
231
+ doi = "10.18653/v1/2025.acl-long.950",
232
+ pages = "19352--19374",
233
+ ISBN = "979-8-89176-251-0",
234
+ abstract = "High computation costs and latency of large language models such as GPT-4 have limited their deployment in clinical settings. Small language models (SLMs) offer a cost-effective alternative, but their limited capacity requires biomedical domain adaptation, which remains challenging. An additional bottleneck is the unavailability and high sensitivity of clinical data. To address these challenges, we propose a novel framework for adapting SLMs into high-performing clinical models. We introduce the MediPhi collection of 3.8B-parameter SLMs developed with our novel framework: pre-instruction tuning of experts on relevant medical and clinical corpora (PMC, Medical Guideline, MedWiki, etc.), model merging, and clinical-tasks alignment. To cover most clinical tasks, we extended the CLUE benchmark to CLUE+, doubling its size. Our expert models deliver relative improvements on this benchmark over the base model without any task-specific fine-tuning: 64.3{\%} on medical entities, 49.5{\%} on radiology reports, and 44{\%} on ICD-10 coding (outperforming GPT-4-0125 by 14{\%}). We unify the expert models into MediPhi via model merging, preserving gains across benchmarks. Furthermore, we built the MediFlow collection, a synthetic dataset of 2.5 million high-quality instructions on 14 medical NLP tasks, 98 fine-grained document types, and JSON format support. Alignment of MediPhi using supervised fine-tuning and direct preference optimization achieves further gains of 18.9{\%} on average."
235
  }
236
 
 
237
  ## Model Card Authors
238
 
239
  Jean-Philippe Corbeil