florin-hf commited on
Commit
194571a
1 Parent(s): a507743

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +9 -7
README.md CHANGED
@@ -92,12 +92,14 @@ Ex.
92
  abstract = "Recent work on open domain question answering (QA) assumes strong supervision of the supporting evidence and/or assumes a blackbox information retrieval (IR) system to retrieve evidence candidates. We argue that both are suboptimal, since gold evidence is not always available, and QA is fundamentally different from IR. We show for the first time that it is possible to jointly learn the retriever and reader from question-answer string pairs and without any IR system. In this setting, evidence retrieval from all of Wikipedia is treated as a latent variable. Since this is impractical to learn from scratch, we pre-train the retriever with an Inverse Cloze Task. We evaluate on open versions of five QA datasets. On datasets where the questioner already knows the answer, a traditional IR system such as BM25 is sufficient. On datasets where a user is genuinely seeking an answer, we show that learned retrieval is crucial, outperforming BM25 by up to 19 points in exact match.",
93
  }
94
 
95
- @misc{cuconasu2024power,
96
- title={The Power of Noise: Redefining Retrieval for RAG Systems},
97
- author={Florin Cuconasu and Giovanni Trappolini and Federico Siciliano and Simone Filice and Cesare Campagnano and Yoelle Maarek and Nicola Tonellotto and Fabrizio Silvestri},
98
- year={2024},
99
- eprint={2401.14887},
100
- archivePrefix={arXiv},
101
- primaryClass={cs.IR}
 
 
102
  }
103
  ```
 
92
  abstract = "Recent work on open domain question answering (QA) assumes strong supervision of the supporting evidence and/or assumes a blackbox information retrieval (IR) system to retrieve evidence candidates. We argue that both are suboptimal, since gold evidence is not always available, and QA is fundamentally different from IR. We show for the first time that it is possible to jointly learn the retriever and reader from question-answer string pairs and without any IR system. In this setting, evidence retrieval from all of Wikipedia is treated as a latent variable. Since this is impractical to learn from scratch, we pre-train the retriever with an Inverse Cloze Task. We evaluate on open versions of five QA datasets. On datasets where the questioner already knows the answer, a traditional IR system such as BM25 is sufficient. On datasets where a user is genuinely seeking an answer, we show that learned retrieval is crucial, outperforming BM25 by up to 19 points in exact match.",
93
  }
94
 
95
+ @inproceedings{Cuconasu_2024, series={SIGIR 2024},
96
+ title={The Power of Noise: Redefining Retrieval for RAG Systems},
97
+ url={http://dx.doi.org/10.1145/3626772.3657834},
98
+ DOI={10.1145/3626772.3657834},
99
+ booktitle={Proceedings of the 47th International ACM SIGIR Conference on Research and Development in Information Retrieval},
100
+ publisher={ACM},
101
+ author={Cuconasu, Florin and Trappolini, Giovanni and Siciliano, Federico and Filice, Simone and Campagnano, Cesare and Maarek, Yoelle and Tonellotto, Nicola and Silvestri, Fabrizio},
102
+ year={2024},
103
+ month=jul, collection={SIGIR 2024}
104
  }
105
  ```