@inproceedings{chai-etal-2024-exploring,
title = "Exploring Scientific Hypothesis Generation with Mamba",
author = "Chai, Miaosen and
Herron, Emily and
Cervantes, Erick and
Ghosal, Tirthankar",
editor = "Peled-Cohen, Lotem and
Calderon, Nitay and
Lissak, Shir and
Reichart, Roi",
booktitle = "Proceedings of the 1st Workshop on NLP for Science (NLP4Science)",
month = nov,
year = "2024",
address = "Miami, FL, USA",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2024.nlp4science-1.17",
pages = "197--207",
abstract = "Generating scientifically grounded hypotheses is a challenging frontier task for generative AI models in science. The difficulty arises from the inherent subjectivity of the task and the extensive knowledge of prior work required to assess the validity of a generated hypothesis. Large Language Models (LLMs), trained on vast datasets from diverse sources, have shown a strong ability to utilize the knowledge embedded in their training data. Recent research has explored using transformer-based models for scientific hypothesis generation, leveraging their advanced capabilities. However, these models often require a significant number of parameters to manage Long sequences, which can be a limitation. State Space Models, such as Mamba, offer an alternative by effectively handling very Long sequences with fewer parameters than transformers. In this work, we investigate the use of Mamba for scientific hypothesis generation. Our preliminary findings indicate that Mamba achieves similar performance w.r.t. transformer-based models of similar sizes for a higher-order complex task like hypothesis generation. We have made our code available here: https://github.com/fglx-c/Exploring-Scientific-Hypothesis-Generation-with-Mamba",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="chai-etal-2024-exploring">
<titleInfo>
<title>Exploring Scientific Hypothesis Generation with Mamba</title>
</titleInfo>
<name type="personal">
<namePart type="given">Miaosen</namePart>
<namePart type="family">Chai</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Emily</namePart>
<namePart type="family">Herron</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Erick</namePart>
<namePart type="family">Cervantes</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Tirthankar</namePart>
<namePart type="family">Ghosal</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-11</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 1st Workshop on NLP for Science (NLP4Science)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Lotem</namePart>
<namePart type="family">Peled-Cohen</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nitay</namePart>
<namePart type="family">Calderon</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Shir</namePart>
<namePart type="family">Lissak</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Roi</namePart>
<namePart type="family">Reichart</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Miami, FL, USA</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Generating scientifically grounded hypotheses is a challenging frontier task for generative AI models in science. The difficulty arises from the inherent subjectivity of the task and the extensive knowledge of prior work required to assess the validity of a generated hypothesis. Large Language Models (LLMs), trained on vast datasets from diverse sources, have shown a strong ability to utilize the knowledge embedded in their training data. Recent research has explored using transformer-based models for scientific hypothesis generation, leveraging their advanced capabilities. However, these models often require a significant number of parameters to manage Long sequences, which can be a limitation. State Space Models, such as Mamba, offer an alternative by effectively handling very Long sequences with fewer parameters than transformers. In this work, we investigate the use of Mamba for scientific hypothesis generation. Our preliminary findings indicate that Mamba achieves similar performance w.r.t. transformer-based models of similar sizes for a higher-order complex task like hypothesis generation. We have made our code available here: https://github.com/fglx-c/Exploring-Scientific-Hypothesis-Generation-with-Mamba</abstract>
<identifier type="citekey">chai-etal-2024-exploring</identifier>
<location>
<url>https://aclanthology.org/2024.nlp4science-1.17</url>
</location>
<part>
<date>2024-11</date>
<extent unit="page">
<start>197</start>
<end>207</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Exploring Scientific Hypothesis Generation with Mamba
%A Chai, Miaosen
%A Herron, Emily
%A Cervantes, Erick
%A Ghosal, Tirthankar
%Y Peled-Cohen, Lotem
%Y Calderon, Nitay
%Y Lissak, Shir
%Y Reichart, Roi
%S Proceedings of the 1st Workshop on NLP for Science (NLP4Science)
%D 2024
%8 November
%I Association for Computational Linguistics
%C Miami, FL, USA
%F chai-etal-2024-exploring
%X Generating scientifically grounded hypotheses is a challenging frontier task for generative AI models in science. The difficulty arises from the inherent subjectivity of the task and the extensive knowledge of prior work required to assess the validity of a generated hypothesis. Large Language Models (LLMs), trained on vast datasets from diverse sources, have shown a strong ability to utilize the knowledge embedded in their training data. Recent research has explored using transformer-based models for scientific hypothesis generation, leveraging their advanced capabilities. However, these models often require a significant number of parameters to manage Long sequences, which can be a limitation. State Space Models, such as Mamba, offer an alternative by effectively handling very Long sequences with fewer parameters than transformers. In this work, we investigate the use of Mamba for scientific hypothesis generation. Our preliminary findings indicate that Mamba achieves similar performance w.r.t. transformer-based models of similar sizes for a higher-order complex task like hypothesis generation. We have made our code available here: https://github.com/fglx-c/Exploring-Scientific-Hypothesis-Generation-with-Mamba
%U https://aclanthology.org/2024.nlp4science-1.17
%P 197-207
Markdown (Informal)
[Exploring Scientific Hypothesis Generation with Mamba](https://aclanthology.org/2024.nlp4science-1.17) (Chai et al., NLP4Science 2024)
ACL
- Miaosen Chai, Emily Herron, Erick Cervantes, and Tirthankar Ghosal. 2024. Exploring Scientific Hypothesis Generation with Mamba. In Proceedings of the 1st Workshop on NLP for Science (NLP4Science), pages 197–207, Miami, FL, USA. Association for Computational Linguistics.