@inproceedings{gainski-balazy-2023-step,
title = "Step by Step Loss Goes Very Far: Multi-Step Quantization for Adversarial Text Attacks",
author = "Gai{\'n}ski, Piotr and
Ba{\l}azy, Klaudia",
editor = "Vlachos, Andreas and
Augenstein, Isabelle",
booktitle = "Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics",
month = may,
year = "2023",
address = "Dubrovnik, Croatia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/2023.eacl-main.149/",
doi = "10.18653/v1/2023.eacl-main.149",
pages = "2038--2048",
abstract = "We propose a novel gradient-based attack against transformer-based language models that searches for an adversarial example in a continuous space of tokens probabilities. Our algorithm mitigates the gap between adversarial loss for continuous and discrete text representations by performing multi-step quantization in a quantization-compensation loop. Experiments show that our method significantly outperforms other approaches on various natural language processing (NLP) tasks."
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="gainski-balazy-2023-step">
<titleInfo>
<title>Step by Step Loss Goes Very Far: Multi-Step Quantization for Adversarial Text Attacks</title>
</titleInfo>
<name type="personal">
<namePart type="given">Piotr</namePart>
<namePart type="family">Gaiński</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Klaudia</namePart>
<namePart type="family">Bałazy</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Andreas</namePart>
<namePart type="family">Vlachos</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Dubrovnik, Croatia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We propose a novel gradient-based attack against transformer-based language models that searches for an adversarial example in a continuous space of tokens probabilities. Our algorithm mitigates the gap between adversarial loss for continuous and discrete text representations by performing multi-step quantization in a quantization-compensation loop. Experiments show that our method significantly outperforms other approaches on various natural language processing (NLP) tasks.</abstract>
<identifier type="citekey">gainski-balazy-2023-step</identifier>
<identifier type="doi">10.18653/v1/2023.eacl-main.149</identifier>
<location>
<url>https://aclanthology.org/2023.eacl-main.149/</url>
</location>
<part>
<date>2023-05</date>
<extent unit="page">
<start>2038</start>
<end>2048</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Step by Step Loss Goes Very Far: Multi-Step Quantization for Adversarial Text Attacks
%A Gaiński, Piotr
%A Bałazy, Klaudia
%Y Vlachos, Andreas
%Y Augenstein, Isabelle
%S Proceedings of the 17th Conference of the European Chapter of the Association for Computational Linguistics
%D 2023
%8 May
%I Association for Computational Linguistics
%C Dubrovnik, Croatia
%F gainski-balazy-2023-step
%X We propose a novel gradient-based attack against transformer-based language models that searches for an adversarial example in a continuous space of tokens probabilities. Our algorithm mitigates the gap between adversarial loss for continuous and discrete text representations by performing multi-step quantization in a quantization-compensation loop. Experiments show that our method significantly outperforms other approaches on various natural language processing (NLP) tasks.
%R 10.18653/v1/2023.eacl-main.149
%U https://aclanthology.org/2023.eacl-main.149/
%U https://doi.org/10.18653/v1/2023.eacl-main.149
%P 2038-2048
Markdown (Informal)
[Step by Step Loss Goes Very Far: Multi-Step Quantization for Adversarial Text Attacks](https://aclanthology.org/2023.eacl-main.149/) (Gaiński & Bałazy, EACL 2023)
ACL