2021
|
 | Nicolas, Lionel; Aparaschivei, Lavinia Nicoleta; Lyding, Verena; Rodosthenous, Christos; Sangati, Federico; König, Alexander; Forascu, Corina An Experiment on Implicitly Crowdsourcing Expert Knowledge about Romanian Synonyms from Language Learners Proceedings Article In: Alfter, David; Volodina, Elena; Pilán, Ildikó; Borin, Johannes Graënand Lars (Ed.): Proceedings of the 10th Workshop on Natural Language Processing for Computer Assisted Language Learning (NLP4CALL 2021), pp. 1-14, Linköping Electronic Conference Proceedings, 2021. @inproceedings{nicolas2021,
title = {An Experiment on Implicitly Crowdsourcing Expert Knowledge about Romanian Synonyms from Language Learners},
author = {Lionel Nicolas and Lavinia Nicoleta Aparaschivei and Verena Lyding and Christos Rodosthenous and Federico Sangati and Alexander König and Corina Forascu},
editor = {David Alfter and Elena Volodina and Ildikó Pilán and Johannes Graënand Lars Borin},
url = {https://ep.liu.se/ecp/177/001/ecp2021177001.pdf},
year = {2021},
date = {2021-05-31},
booktitle = {Proceedings of the 10th Workshop on Natural Language Processing for Computer Assisted Language Learning (NLP4CALL 2021)},
journal = {Proceedings of the 10th Workshop on Natural Language Processing for Computer Assisted Language Learning (NLP4CALL 2021)},
volume = {47},
pages = {1-14},
publisher = {Linköping Electronic Conference Proceedings},
keywords = {crowdsourcing, enetCollect, Language learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|
2020
|
 | Araneta, Marianne Grace; Eryigit, Gülsen; König, Alexander; Lee, Ji-Ung; Luís, Ana; Lyding, Verena; Nicolas, Lionel; Rodosthenous, Christos; Sangati, Federico Substituto - A Synchronous Educational Language Game for Simultaneous Teaching and Crowdsourcing Proceedings Article In: Proceedings of the 9th Workshop on Natural Language Processing for Computer Assisted Language Learning (NLP4CALL 2020), 2020. @inproceedings{marianne2020,
title = {Substituto - A Synchronous Educational Language Game for Simultaneous Teaching and Crowdsourcing},
author = {Marianne Grace Araneta and Gülsen Eryigit and Alexander König and Ji-Ung Lee and Ana Luís and Verena Lyding and Lionel Nicolas and Christos Rodosthenous and Federico Sangati},
url = {https://ep.liu.se/ecp/175/001/ecp20175001.pdf},
year = {2020},
date = {2020-11-20},
booktitle = {Proceedings of the 9th Workshop on Natural Language Processing for Computer Assisted Language Learning (NLP4CALL 2020)},
abstract = {This paper investigates a general framework for synchronous educational language games that simultaneously allows researchers to crowdsource learner answers in a controlled environment. Our prototype Substituto allows teachers and students to interact in real-time while undergoing language learning exercises; ensuring that the learner’s progress is not hurt by the introduction of crowdsourcing elements. We evaluate Substituto with a small-scale user study that focuses on training the use of English verb-particle constructions (VPCs), such as break down or take over, and test their use with second language learners of English of different proficiency levels over five pilot sessions. With the study we aim to ensure that our prototypical implementation behaves as expected and to identify any major design flaws that should be addressed. The preliminary results we achieved in order to evaluate the educational value, the user experience and the crowdsourcing capacity of XYZ-Bot confirm that it has the potential to become a valuable asset for language learning, a pleasant learning instrument and a crowdsourcing tool for collecting linguistic knowledge.},
keywords = {crowdsourcing, Language learning},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper investigates a general framework for synchronous educational language games that simultaneously allows researchers to crowdsource learner answers in a controlled environment. Our prototype Substituto allows teachers and students to interact in real-time while undergoing language learning exercises; ensuring that the learner’s progress is not hurt by the introduction of crowdsourcing elements. We evaluate Substituto with a small-scale user study that focuses on training the use of English verb-particle constructions (VPCs), such as break down or take over, and test their use with second language learners of English of different proficiency levels over five pilot sessions. With the study we aim to ensure that our prototypical implementation behaves as expected and to identify any major design flaws that should be addressed. The preliminary results we achieved in order to evaluate the educational value, the user experience and the crowdsourcing capacity of XYZ-Bot confirm that it has the potential to become a valuable asset for language learning, a pleasant learning instrument and a crowdsourcing tool for collecting linguistic knowledge. |
 | Holdt, Špela Arhar; Zviel-Girshin, Rina; Gajek, Elżbieta; Durán-Muñoz, Isabel; Bago, Petra; Fort, Karën; Hatipoğlu, Ciler; Kasperavičienė, Ramunė; Koeva, Svetla; Konjik, Ivana Lazić; Miloshevska, Lina; Ordulj, Antonia; Rodosthenous, Christos; Volodina, Elena; Weber, Tassja; Zanasi, Lorenzo Language Teachers and Crowdsourcing: Insights from a Cross-European Survey. Journal Article In: Rasprave, vol. 46, no. 1, 2020. @article{rodosthenous_2020.4,
title = {Language Teachers and Crowdsourcing: Insights from a Cross-European Survey.},
author = {Špela Arhar Holdt and Rina Zviel-Girshin and Elżbieta Gajek and Isabel Durán-Muñoz and Petra Bago and Karën Fort and Ciler Hatipoğlu and Ramunė Kasperavičienė and Svetla Koeva and Ivana Lazić Konjik and Lina Miloshevska and Antonia Ordulj and Christos Rodosthenous and Elena Volodina and Tassja Weber and Lorenzo Zanasi},
url = {https://hrcak.srce.hr/index.php?show=toc&id_broj=19344},
year = {2020},
date = {2020-09-02},
journal = {Rasprave},
volume = {46},
number = {1},
publisher = {Institut za hrvatski jezik i jezikoslovlje},
address = {Zagreb},
keywords = {crowdsourcing, distance education, Language learning},
pubstate = {published},
tppubtype = {article}
}
|
 | Nicolas, Lionel; Lyding, Verena; Borg, Claudia; Forascu, Corina; Fort, Karën; Zdravkova, Katerina; Kosem, Iztok; Čibej, Jaka; Holdt, Špela Arhar; Millour, Alice; König, Alexander; Rodosthenous, Christos; Sangati, Federico; ul Hassan, Umair; Katinskaia, Anisia; Barreiro, Anabela; Aparaschivei, Lavinia; HaCohen-Kerner, Yaakov Creating Expert Knowledge by Relying on Language Learners: a Generic Approach for Mass-Producing Language Resources by Combining Implicit Crowdsourcing and Language Learning Proceedings Article In: Proceedings of The 12th Language Resources and Evaluation Conference, pp. 268–278, European Language Resources Association, Marseille, France, 2020, ISBN: 979-10-95546-34-4. @inproceedings{nicolas-etal-2020-creating,
title = {Creating Expert Knowledge by Relying on Language Learners: a Generic Approach for Mass-Producing Language Resources by Combining Implicit Crowdsourcing and Language Learning},
author = {Lionel Nicolas and Verena Lyding and Claudia Borg and Corina Forascu and Karën Fort and Katerina Zdravkova and Iztok Kosem and Jaka Čibej and Špela Arhar Holdt and Alice Millour and Alexander König and Christos Rodosthenous and Federico Sangati and Umair ul Hassan and Anisia Katinskaia and Anabela Barreiro and Lavinia Aparaschivei and Yaakov HaCohen-Kerner},
url = {https://www.christosrodosthenous.info/wp-content/uploads/2020/05/2020.lrec-1.34-1.pdf
https://www.aclweb.org/anthology/2020.lrec-1.34},
isbn = {979-10-95546-34-4},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
pages = {268--278},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {We introduce in this paper a generic approach to combine implicit crowdsourcing and language learning in order to mass-produce language resources (LRs) for any language for which a crowd of language learners can be involved. We present the approach by explaining its core paradigm that consists in pairing specific types of LRs with specific exercises, by detailing both its strengths and challenges, and by discussing how much these challenges have been addressed at present. Accordingly, we also report on on-going proof-of-concept efforts aiming at developing the first prototypical implementation of the approach in order to correct and extend an LR called ConceptNet based on the input crowdsourced from language learners. We then present an international network called the European Network for Combining Language Learning with Crowdsourcing Techniques (enetCollect) that provides the context to accelerate the implementation of this generic approach. Finally, we exemplify how it can be used in several language learning scenarios to produce a multitude of NLP resources and how it can therefore alleviate the long-standing NLP issue of the lack of LRs.},
keywords = {crowdsourcing, enetCollect, Language learning},
pubstate = {published},
tppubtype = {inproceedings}
}
We introduce in this paper a generic approach to combine implicit crowdsourcing and language learning in order to mass-produce language resources (LRs) for any language for which a crowd of language learners can be involved. We present the approach by explaining its core paradigm that consists in pairing specific types of LRs with specific exercises, by detailing both its strengths and challenges, and by discussing how much these challenges have been addressed at present. Accordingly, we also report on on-going proof-of-concept efforts aiming at developing the first prototypical implementation of the approach in order to correct and extend an LR called ConceptNet based on the input crowdsourced from language learners. We then present an international network called the European Network for Combining Language Learning with Crowdsourcing Techniques (enetCollect) that provides the context to accelerate the implementation of this generic approach. Finally, we exemplify how it can be used in several language learning scenarios to produce a multitude of NLP resources and how it can therefore alleviate the long-standing NLP issue of the lack of LRs. |
 | Rodosthenous, Christos; Lyding, Verena; Sangati, Federico; König, Alexander; ul Hassan, Umair; Nicolas, Lionel; Horbacauskiene, Jolita; Katinskaia, Anisia; Aparaschivei, Lavinia Using Crowdsourced Exercises for Vocabulary Training to Expand ConceptNet Proceedings Article In: Proceedings of The 12th Language Resources and Evaluation Conference, pp. 307–316, European Language Resources Association, Marseille, France, 2020, ISBN: 979-10-95546-34-4. @inproceedings{rodosthenous-etal-2020-using,
title = {Using Crowdsourced Exercises for Vocabulary Training to Expand ConceptNet},
author = {Christos Rodosthenous and Verena Lyding and Federico Sangati and Alexander König and Umair ul Hassan and Lionel Nicolas and Jolita Horbacauskiene and Anisia Katinskaia and Lavinia Aparaschivei},
url = {https://www.christosrodosthenous.info/wp-content/uploads/2020/05/2020.lrec-1.38.pdf
https://www.aclweb.org/anthology/2020.lrec-1.38},
isbn = {979-10-95546-34-4},
year = {2020},
date = {2020-01-01},
booktitle = {Proceedings of The 12th Language Resources and Evaluation Conference},
pages = {307--316},
publisher = {European Language Resources Association},
address = {Marseille, France},
abstract = {In this work, we report on a crowdsourcing experiment conducted using the V-TREL vocabulary trainer which is accessed via a Telegram chatbot interface to gather knowledge on word relations suitable for expanding ConceptNet. V-TREL is built on top of a generic architecture implementing the implicit crowdsourding paradigm in order to offer vocabulary training exercises generated from the commonsense knowledge-base ConceptNet and -- in the background -- to collect and evaluate the learners' answers to extend ConceptNet with new words. In the experiment about 90 university students learning English at C1 level, based on Common European Framework of Reference for Languages (CEFR), trained their vocabulary with V-TREL over a period of 16 calendar days. The experiment allowed to gather more than 12,000 answers from learners on different question types. In this paper we present in detail the experimental setup and the outcome of the experiment, which indicates the potential of our approach for both crowdsourcing data as well as fostering vocabulary skills.},
keywords = {ConceptNet, enetCollect, Language learning},
pubstate = {published},
tppubtype = {inproceedings}
}
In this work, we report on a crowdsourcing experiment conducted using the V-TREL vocabulary trainer which is accessed via a Telegram chatbot interface to gather knowledge on word relations suitable for expanding ConceptNet. V-TREL is built on top of a generic architecture implementing the implicit crowdsourding paradigm in order to offer vocabulary training exercises generated from the commonsense knowledge-base ConceptNet and -- in the background -- to collect and evaluate the learners' answers to extend ConceptNet with new words. In the experiment about 90 university students learning English at C1 level, based on Common European Framework of Reference for Languages (CEFR), trained their vocabulary with V-TREL over a period of 16 calendar days. The experiment allowed to gather more than 12,000 answers from learners on different question types. In this paper we present in detail the experimental setup and the outcome of the experiment, which indicates the potential of our approach for both crowdsourcing data as well as fostering vocabulary skills. |
2019
|
 | Lyding, Verena; Rodosthenous, Christos; Sangati, Federico; ul Hassan, Umair; Nicolas, Lionel; König, Alexander; Horbacauskiene, Jolita; Katinskaia, Anisia v-trel: Vocabulary Trainer for Tracing Word Relations - An Implicit Crowdsourcing Approach Proceedings Article In: Angelova, Galia; Mitkov, Ruslan; Nikolova, Ivelina; Temnikova, Irina (Ed.): Proceedings of the International Conference Recent Advances in Natural Language Processing, RANLP 2019, pp. 675-684, Varna, Bulgaria, 2019. @inproceedings{lyding:2019:RANLP,
title = {v-trel: Vocabulary Trainer for Tracing Word Relations - An Implicit Crowdsourcing Approach},
author = {Verena Lyding and Christos Rodosthenous and Federico Sangati and Umair ul Hassan and Lionel Nicolas and Alexander König and Jolita Horbacauskiene and Anisia Katinskaia},
editor = {Galia Angelova and Ruslan Mitkov and Ivelina Nikolova and Irina Temnikova},
url = {https://www.christosrodosthenous.info/wp-content/uploads/2019/09/RANLP_2019_Paper.pdf},
year = {2019},
date = {2019-09-02},
booktitle = {Proceedings of the International Conference Recent Advances in Natural Language Processing, RANLP 2019},
pages = {675-684},
address = {Varna, Bulgaria},
abstract = {In this paper, we present our work on developing a vocabulary trainer that uses exercises generated from language resources such as ConceptNet and crowdsources the responses of the learners to enrich the language resource. We performed an empirical evaluation of our approach with 60 non-native speakers over two days, which shows that new entries to expand ConceptNet can efficiently be gathered through vocabulary exercises on word relations.
We also report on the feedback gathered from the users and an expert from language teaching, and discuss the potential of the vocabulary trainer application from the user and language learner perspective. The feedback suggests that v-trel has educational potential, while in its current state some shortcomings could be identified.},
keywords = {ConceptNet, crowdsourcing, Language learning, Natural Language Processing},
pubstate = {published},
tppubtype = {inproceedings}
}
In this paper, we present our work on developing a vocabulary trainer that uses exercises generated from language resources such as ConceptNet and crowdsources the responses of the learners to enrich the language resource. We performed an empirical evaluation of our approach with 60 non-native speakers over two days, which shows that new entries to expand ConceptNet can efficiently be gathered through vocabulary exercises on word relations.
We also report on the feedback gathered from the users and an expert from language teaching, and discuss the potential of the vocabulary trainer application from the user and language learner perspective. The feedback suggests that v-trel has educational potential, while in its current state some shortcomings could be identified. |
 | Rodosthenous, Christos; Lyding, Verena; König, Alexander; Horbacauskiene, Jolita; Katinskaia, Anisia; ul Hassan, Umair; Isaak, Nicos; Sangati, Federico; Nicolas, Lionel Designing a Prototype Architecture for Crowdsourcing Language Resources Proceedings Article In: Declerck, Thierry; McCrae, John P. (Ed.): Proceedings of the Poster Session of the 2nd Conference on Language, Data and Knowledge (LDK 2019), pp. 17–23, CEUR, 2019. @inproceedings{enetcollect1,
title = {Designing a Prototype Architecture for Crowdsourcing Language Resources},
author = {Christos Rodosthenous and Verena Lyding and Alexander König and Jolita Horbacauskiene and Anisia Katinskaia and Umair ul Hassan and Nicos Isaak and Federico Sangati and Lionel Nicolas},
editor = {Thierry Declerck and John P. McCrae},
url = {http://ceur-ws.org/Vol-2402/paper4.pdf},
year = {2019},
date = {2019-07-10},
booktitle = { Proceedings of the Poster Session of the 2nd Conference on Language, Data and Knowledge (LDK 2019)},
volume = {Vol-2402},
pages = {17--23},
publisher = {CEUR},
abstract = {We present an architecture for crowdsourcing
language resources from language learners and a prototype implementation of it as a vocabulary trainer. The vocabulary trainer relies on lexical resources from the ConceptNet semantic network to generate exercises while using the learners' answers to improve the resources used for the exercise generation.},
keywords = {Commonsense Knowledge, ConceptNet, crowdsourcing, enetCollect, Knowledge Bases, Language learning, Language Resources, Lexicon},
pubstate = {published},
tppubtype = {inproceedings}
}
We present an architecture for crowdsourcing
language resources from language learners and a prototype implementation of it as a vocabulary trainer. The vocabulary trainer relies on lexical resources from the ConceptNet semantic network to generate exercises while using the learners' answers to improve the resources used for the exercise generation. |
2018
|
 | Zviel-Girshin, Rina; Rodosthenous, Christos; Rosenberg, Nathan SUSTAIN-ALL Language Learning Conference Proceedings of the 12th Israel Association for Information Systems (ILAIS) Conference, 2018. @conference{Zviel-Girshin2018,
title = {SUSTAIN-ALL Language Learning},
author = {Rina Zviel-Girshin and Christos Rodosthenous and Nathan Rosenberg},
url = {https://www.christosrodosthenous.info/wp-content/uploads/2019/01/ILAIS_2018_paper_18.pdf},
year = {2018},
date = {2018-01-24},
booktitle = {Proceedings of the 12th Israel Association for Information Systems (ILAIS) Conference},
keywords = {Computer Assisted Language Learning, enetCollect, Human computer interaction, Language learning},
pubstate = {published},
tppubtype = {conference}
}
|