2022
Conferences

Igor Nascimento; Rinaldo Lima; Adrian Chifu; Bernard Espinasse; Sébastien Fournier
DeepREF: A Framework for Optimized Deep Learning-based Relation Classification Conference
Proceedings of the 13th Conference on Language Resources and Evaluation (LREC 2022), European Language Resources Association (ELRA), Marseille, France, 2022.
Abstract | Links | BibTeX | Tags: DDI, DeepREF, Embeddings, Framework, NLP, Optuna, Relation Classification, SemEval
@conference{ChifuLREC2022,
title = {DeepREF: A Framework for Optimized Deep Learning-based Relation Classification},
author = {Igor Nascimento and Rinaldo Lima and Adrian Chifu and Bernard Espinasse and Sébastien Fournier},
url = {http://www.lrec-conf.org/proceedings/lrec2022/pdf/2022.lrec-1.480.pdf},
year = {2022},
date = {2022-06-20},
urldate = {2022-06-20},
booktitle = {Proceedings of the 13th Conference on Language Resources and Evaluation (LREC 2022)},
pages = {4513–4522},
publisher = {European Language Resources Association (ELRA)},
address = {Marseille, France},
abstract = {Relation Extraction (RE) is an important basic Natural Language Processing (NLP) task for many applications, including search engines and question-answering systems. There are many studies in this subarea of NLP that continue to be explored, such as the ones concerned by SemEval shared tasks. For many years, several RE systems based on statistical models have been proposed, as well as the frameworks to develop them. We focus on frameworks allowing to develop such RE systems using deep learning models. Such frameworks make it possible to reproduce experiments using many deep learning models and preprocessing techniques. Currently, there are very few frameworks of this type. In this paper, we propose an open and optimizable framework called DeepREF, inspired by two other existing frameworks: OpenNRE and REflex. DeepREF allows the rapid development of deep learning models for Relation Classification (RC). In addition, it enables hyperparameter optimization, and the application of many preprocessing techniques on the input textual data. DeepREF provides means to boost the process of running deep learning models for RC tasks on different datasets and models. DeepREF is evaluated on three reference corpora and has demonstrated competitive results compared to other state-of-the-art RC systems.},
keywords = {DDI, DeepREF, Embeddings, Framework, NLP, Optuna, Relation Classification, SemEval},
pubstate = {published},
tppubtype = {conference}
}
2019
Conferences

Bernard Espinasse; Sébastien Fournier; Adrian Chifu; Gaël Guibon; René Azcurra; Valentin Mace
On the Use of Dependencies in Relation Classification of Text with Deep Learning Conference
20th International Conference on Computational Linguistics and Intelligent Text Processing (CICLing2019), CICLing2019 2019.
Abstract | Links | BibTeX | Tags: Compositional Word Embedding, Deep Learning, Dependencies, Relation Classification, Word Embedding
@conference{Espinasse2019,
title = {On the Use of Dependencies in Relation Classification of Text with Deep Learning},
author = {Bernard Espinasse and Sébastien Fournier and Adrian Chifu and Gaël Guibon and René Azcurra and Valentin Mace},
url = {https://hal.archives-ouvertes.fr/hal-02103919/document},
year = {2019},
date = {2019-04-07},
urldate = {2019-04-07},
booktitle = {20th International Conference on Computational Linguistics and Intelligent Text Processing (CICLing2019)},
series = {CICLing2019},
abstract = {Deep Learning is more and more used in NLP tasks, such as in relation classification of texts. This paper assesses the impact of syntactic dependencies in this task at two levels. The first level concerns the generic Word Embedding (WE) as input of the classification model, the second level concerns the corpus whose relations have to be classified. In this paper, two classification models are studied, the first one is based on a CNN using a generic WE and does not take into account the dependencies of the corpus to be treated, and the second one is based on a compositional WE combining a generic WE with syntactical annotations of this corpus to classify. The impact of dependencies in relation classification is estimated using two different WE. The first one is essentially lexical and trained on the Wikipedia corpus in English, while the second one is also syntactical, trained on the same previously annotated corpus with syntactical dependencies. The two classification models are evaluated on the SemEval 2010 reference corpus using these two generic WE. The experiments show the importance of taking dependencies into account at different levels in the relation classification.},
keywords = {Compositional Word Embedding, Deep Learning, Dependencies, Relation Classification, Word Embedding},
pubstate = {published},
tppubtype = {conference}
}