@inproceedings{wood-doughty-etal-2018-convolutions, abstract = {While recurrent neural networks (RNNs) are widely used for text classification, they demonstrate poor performance and slow convergence when trained on long sequences. When text is modeled as characters instead of words, the longer sequences make RNNs a poor choice. Convolutional neural networks (CNNs), although somewhat less ubiquitous than RNNs, have an internal structure more appropriate for long-distance character dependencies. To better understand how CNNs and RNNs differ in handling long sequences, we use them for text classification tasks in several character-level social media datasets. The CNN models vastly outperform the RNN models in our experiments, suggesting that CNNs are superior to RNNs at learning to classify character-level data.}, address = {Brussels, Belgium}, author = {Wood-Doughty, Zach and Andrews, Nicholas and Dredze, Mark}, booktitle = {Proceedings of the 2018 EMNLP Workshop W-NUT: The 4th Workshop on Noisy User-generated Text}, doi = {10.18653/v1/W18-6127}, link = {[{'url': 'https://doi.org/10.18653/v1/W18-6127', 'anchor': 'doi'}]}, month = {November}, pages = {208--213}, publisher = {Association for Computational Linguistics}, title = {Convolutions Are All You Need (For Classifying Character Sequences)}, url = {https://www.aclweb.org/anthology/W18-6127}, year = {2018} }