From 3f4b92103f11139908294735c4995b6a187c66e0 Mon Sep 17 00:00:00 2001 From: Leonhard Hennig Date: Mon, 27 May 2024 11:44:04 +0200 Subject: [PATCH] added some papers --- .../cald-eacl24-nishiyama-assessing/cite.bib | 24 +++++++ .../cald-eacl24-nishiyama-assessing/index.md | 69 +++++++++++++++++++ .../hcinlp24-wang-llmcheckup/cite.bib | 11 +++ .../hcinlp24-wang-llmcheckup/index.md | 69 +++++++++++++++++++ 4 files changed, 173 insertions(+) create mode 100644 content/publication/cald-eacl24-nishiyama-assessing/cite.bib create mode 100644 content/publication/cald-eacl24-nishiyama-assessing/index.md create mode 100644 content/publication/hcinlp24-wang-llmcheckup/cite.bib create mode 100644 content/publication/hcinlp24-wang-llmcheckup/index.md diff --git a/content/publication/cald-eacl24-nishiyama-assessing/cite.bib b/content/publication/cald-eacl24-nishiyama-assessing/cite.bib new file mode 100644 index 0000000..f842b9b --- /dev/null +++ b/content/publication/cald-eacl24-nishiyama-assessing/cite.bib @@ -0,0 +1,24 @@ +@inproceedings{nishiyama-etal-2024-assessing, + title = "Assessing Authenticity and Anonymity of Synthetic User-generated Content in the Medical Domain", + author = "Nishiyama, Tomohiro and + Raithel, Lisa and + Roller, Roland and + Zweigenbaum, Pierre and + Aramaki, Eiji", + editor = {Volodina, Elena and + Alfter, David and + Dobnik, Simon and + Lindstr{\"o}m Tiedemann, Therese and + Mu{\~n}oz S{\'a}nchez, Ricardo and + Szawerna, Maria Irena and + Vu, Xuan-Son}, + booktitle = "Proceedings of the Workshop on Computational Approaches to Language Data Pseudonymization (CALD-pseudo 2024)", + month = mar, + year = "2024", + address = "St. Julian{'}s, Malta", + publisher = "Association for Computational Linguistics", + url = "https://aclanthology.org/2024.caldpseudo-1.2", + pages = "8--17", + abstract = "Since medical text cannot be shared easily due to privacy concerns, synthetic data bears much potential for natural language processing applications. In the context of social media and user-generated messages about drug intake and adverse drug effects, this work presents different methods to examine the authenticity of synthetic text. We conclude that the generated tweets are untraceable and show enough authenticity from the medical point of view to be used as a replacement for a real Twitter corpus. However, original data might still be the preferred choice as they contain much more diversity.", +} + diff --git a/content/publication/cald-eacl24-nishiyama-assessing/index.md b/content/publication/cald-eacl24-nishiyama-assessing/index.md new file mode 100644 index 0000000..280c0d5 --- /dev/null +++ b/content/publication/cald-eacl24-nishiyama-assessing/index.md @@ -0,0 +1,69 @@ +--- +# Documentation: https://wowchemy.com/docs/managing-content/ + +title: "Assessing Authenticity and Anonymity of Synthetic User-generated Content in the Medical Domain" +authors: [Tomohiro Nishiyama, Lisa Raithel, Roland Roller, Pierre Zweigenbaum, Eiji Aramaki] +date: 2024-03-20T10:33:03+02:00 +doi: "" + +# Schedule page publish date (NOT publication's date). +publishDate: 2024-03-23T10:33:03+02:00 + +# Publication type. +# Legend: 0 = Uncategorized; 1 = Conference paper; 2 = Journal article; +# 3 = Preprint / Working Paper; 4 = Report; 5 = Book; 6 = Book section; +# 7 = Thesis; 8 = Patent +publication_types: ["1"] + +# Publication name and optional abbreviated publication name. +publication: "Proceedings of the Workshop on Computational Approaches to Language Data Pseudonymization" +publication_short: "CALD-pseudo 2024" + +abstract: "Since medical text cannot be shared easily due to privacy concerns, synthetic data bears much potential for natural language processing applications. In the context of social media and user-generated messages about drug intake and adverse drug effects, this work presents different methods to examine the authenticity of synthetic text. We conclude that the generated tweets are untraceable and show enough authenticity from the medical point of view to be used as a replacement for a real Twitter corpus. However, original data might still be the preferred choice as they contain much more diversity." + +# Summary. An optional shortened abstract. +summary: "" + +tags: [] +categories: [] +featured: false + +# Custom links (optional). +# Uncomment and edit lines below to show custom links. +# links: +# - name: Follow +# url: https://twitter.com +# icon_pack: fab +# icon: twitter + +url_pdf: "https://aclanthology.org/2024.caldpseudo-1.2.pdf" +url_code: "" +url_dataset: +url_poster: +url_project: +url_slides: +url_source: +url_video: + +# Featured image +# To use, add an image named `featured.jpg/png` to your page's folder. +# Focal points: Smart, Center, TopLeft, Top, TopRight, Left, Right, BottomLeft, Bottom, BottomRight. +image: + caption: "" + focal_point: "" + preview_only: false + +# Associated Projects (optional). +# Associate this publication with one or more of your projects. +# Simply enter your project's folder or file name without extension. +# E.g. `internal-project` references `content/project/internal-project/index.md`. +# Otherwise, set `projects: []`. +projects: [KEEPHA] + +# Slides (optional). +# Associate this publication with Markdown slides. +# Simply enter your slide deck's filename without extension. +# E.g. `slides: "example"` references `content/slides/example/index.md`. +# Otherwise, set `slides: ""`. +slides: "" +--- diff --git a/content/publication/hcinlp24-wang-llmcheckup/cite.bib b/content/publication/hcinlp24-wang-llmcheckup/cite.bib new file mode 100644 index 0000000..2b04376 --- /dev/null +++ b/content/publication/hcinlp24-wang-llmcheckup/cite.bib @@ -0,0 +1,11 @@ +@inproceedings{pub14839, + author = {Wang, Qianli and Anikina, Tatiana and Feldhus, Nils and van Genabith, Josef and Hennig, Leonhard and Möller, Sebastian +}, + editor = {Blodgett, Su Lin and Curry, Amanda Cercas and Dev, Sunipa and Madaio, Michael and Nenkova, Ani and Yang, Diyi and Xiao, Ziang +}, + title = {LLMCheckup: Conversational Examination of Large Language Models via Interpretability Tools and Self-Explanations}, + booktitle = {Proceedings of the Third Workshop on Bridging Human-Computer Interaction and Natural Language Processing (HCI+NLP). NAACL Workshop on Bridging Human-Computer Interaction and Natural Language Processing (HCI+NLP-2024), befindet sich North American Chapter of the Association for Computational Linguistics (NAACL), Mexico City, Mexico}, + year = {2024}, + publisher = {Association for Computational Linguistics.}, + note = {https://github.com/DFKI-NLP/LLMCheckup} +} diff --git a/content/publication/hcinlp24-wang-llmcheckup/index.md b/content/publication/hcinlp24-wang-llmcheckup/index.md new file mode 100644 index 0000000..4862a53 --- /dev/null +++ b/content/publication/hcinlp24-wang-llmcheckup/index.md @@ -0,0 +1,69 @@ +--- +# Documentation: https://wowchemy.com/docs/managing-content/ + +title: "LLMCheckup: Conversational Examination of Large Language Models via Interpretability Tools and Self-Explanations" +authors: [Qianli Wang, Tatiana Anikina, Nils Feldhus, Josef van Genabith, Leonhard Hennig, Sebastian Möller] +date: 2024-05-20T10:33:03+02:00 +doi: "" + +# Schedule page publish date (NOT publication's date). +publishDate: 2024-05-20T10:33:03+02:00 + +# Publication type. +# Legend: 0 = Uncategorized; 1 = Conference paper; 2 = Journal article; +# 3 = Preprint / Working Paper; 4 = Report; 5 = Book; 6 = Book section; +# 7 = Thesis; 8 = Patent +publication_types: ["1"] + +# Publication name and optional abbreviated publication name. +publication: "Proceedings of the Third Workshop on Bridging Human-Computer Interaction and Natural Language Processing" +publication_short: "HCI+NLP-2024" + +abstract: "Interpretability tools that offer explanations in the form of a dialogue have demonstrated their efficacy in enhancing users' understanding (Slack et al., 2023; Shen et al., 2023), as one-off explanations may fall short in providing sufficient information to the user. Current solutions for dialogue-based explanations, however, often require external tools and modules and are not easily transferable to tasks they were not designed for. With LLMCheckup, we present an easily accessible tool that allows users to chat with any state-of-the-art large language model (LLM) about its behavior. We enable LLMs to generate explanations and perform user intent recognition without fine-tuning, by connecting them with a broad spectrum of Explainable AI (XAI) methods, including white-box explainability tools such as feature attributions, and self-explanations (e.g., for rationale generation). LLM-based (self-)explanations are presented as an interactive dialogue that supports follow-up questions and generates suggestions. LLMCheckup provides tutorials for operations available in the system, catering to individuals with varying levels of expertise in XAI and supporting multiple input modalities. We introduce a new parsing strategy that substantially enhances the user intent recognition accuracy of the LLM. Finally, we showcase LLMCheckup for the tasks of fact checking and commonsense question answering." + +# Summary. An optional shortened abstract. +summary: "" + +tags: [] +categories: [] +featured: false + +# Custom links (optional). +# Uncomment and edit lines below to show custom links. +# links: +# - name: Follow +# url: https://twitter.com +# icon_pack: fab +# icon: twitter + +url_pdf: "https://arxiv.org/pdf/2401.12576" +url_code: "https://github.com/DFKI-NLP/LLMCheckup" +url_dataset: +url_poster: +url_project: +url_slides: +url_source: +url_video: + +# Featured image +# To use, add an image named `featured.jpg/png` to your page's folder. +# Focal points: Smart, Center, TopLeft, Top, TopRight, Left, Right, BottomLeft, Bottom, BottomRight. +image: + caption: "" + focal_point: "" + preview_only: false + +# Associated Projects (optional). +# Associate this publication with one or more of your projects. +# Simply enter your project's folder or file name without extension. +# E.g. `internal-project` references `content/project/internal-project/index.md`. +# Otherwise, set `projects: []`. +projects: [XAINES] + +# Slides (optional). +# Associate this publication with Markdown slides. +# Simply enter your slide deck's filename without extension. +# E.g. `slides: "example"` references `content/slides/example/index.md`. +# Otherwise, set `slides: ""`. +slides: "" +---