diff --git a/.gitmodules b/.gitmodules index 0fedd4283e2..03e2b42b720 100755 --- a/.gitmodules +++ b/.gitmodules @@ -46,10 +46,6 @@ path = _external/resources/wai-audiences url = https://github.com/w3c/wai-audiences.git branch = master -[submodule "_external/resources/wai-about-wai"] - path = _external/resources/wai-about-wai - url = https://github.com/w3c/wai-about-wai.git - branch = master [submodule "_external/resources/wai-statements"] path = _external/resources/wai-statements url = https://github.com/w3c/wai-statements.git diff --git a/_config.yml b/_config.yml index 72c9e8d7d31..9ee4d4ccf3e 100755 --- a/_config.yml +++ b/_config.yml @@ -90,12 +90,6 @@ collections: permalink: /roles/:path/ prevnext: false parent: "/" - about: - name: "About" - output: true - acknowledgements: false - permalink: /:collection/:path/ - prevnext: false posts: output: true permalink: /news/:year-:month-:day/:title/ @@ -151,12 +145,6 @@ defaults: layout: "policy" github: repository: w3c/wai-policies-prototype - - scope: - path: "_about" - values: - layout: "default" - github: - repository: w3c/wai-about-wai - scope: path: "_posts" values: @@ -338,8 +326,6 @@ defaults: scope: path: "_translations-sitemaps" values: - github: - repository: w3c/wai-website lang: en layout: translation-sitemap description: Help make the Web accessible to people with disabilities around the world. We appreciate your contributions to translating W3C WAI accessibility resources. @@ -352,7 +338,6 @@ defaults: margin: 0.2rem 0 0.4rem; } - # include all collections here so they are searchable tipue_search: include: @@ -365,10 +350,10 @@ tipue_search: "tips", "policies", "audiences", - "about", - "objectives", + "objectives", "patterns", - "apg-examples" + "apg-examples", + "translations-sitemaps" ] # TODO docs say don't need to declare any used in the Theme diff --git a/_external/resources/wai-about-wai b/_external/resources/wai-about-wai deleted file mode 160000 index 1537418b95b..00000000000 --- a/_external/resources/wai-about-wai +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 1537418b95b6eb85cd8f29ffad3140f77d4cf001 diff --git a/collections/_about b/collections/_about deleted file mode 120000 index 108ee5c6725..00000000000 --- a/collections/_about +++ /dev/null @@ -1 +0,0 @@ -../_external/resources/wai-about-wai/_about/ \ No newline at end of file diff --git a/content-images/about/eu.svg b/content-images/about/eu.svg new file mode 100644 index 00000000000..754f1480fc7 --- /dev/null +++ b/content-images/about/eu.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/content-images/about/logo-OpenConf.png b/content-images/about/logo-OpenConf.png new file mode 100644 index 00000000000..154da0b2417 Binary files /dev/null and b/content-images/about/logo-OpenConf.png differ diff --git a/content-images/about/social-translations.png b/content-images/about/social-translations.png new file mode 100644 index 00000000000..bff6d599a64 Binary files /dev/null and b/content-images/about/social-translations.png differ diff --git a/content-images/about/translating/commit-changes.jpg b/content-images/about/translating/commit-changes.jpg new file mode 100644 index 00000000000..d84b0969c03 Binary files /dev/null and b/content-images/about/translating/commit-changes.jpg differ diff --git a/content-images/about/translating/compare-changes.jpg b/content-images/about/translating/compare-changes.jpg new file mode 100644 index 00000000000..d73deb2b050 Binary files /dev/null and b/content-images/about/translating/compare-changes.jpg differ diff --git a/content-images/about/translating/comparing-changes.jpg b/content-images/about/translating/comparing-changes.jpg new file mode 100644 index 00000000000..2ae07867c59 Binary files /dev/null and b/content-images/about/translating/comparing-changes.jpg differ diff --git a/content-images/about/translating/create-draft-pull-request.jpg b/content-images/about/translating/create-draft-pull-request.jpg new file mode 100644 index 00000000000..f3177491a1e Binary files /dev/null and b/content-images/about/translating/create-draft-pull-request.jpg differ diff --git a/content-images/about/translating/draft-button.jpg b/content-images/about/translating/draft-button.jpg new file mode 100644 index 00000000000..442e50d7141 Binary files /dev/null and b/content-images/about/translating/draft-button.jpg differ diff --git a/content-images/about/translating/expand-file-tree.jpg b/content-images/about/translating/expand-file-tree.jpg new file mode 100644 index 00000000000..584720566e1 Binary files /dev/null and b/content-images/about/translating/expand-file-tree.jpg differ diff --git a/content-images/about/translating/fork-edit-link.jpg b/content-images/about/translating/fork-edit-link.jpg new file mode 100644 index 00000000000..506fa09738c Binary files /dev/null and b/content-images/about/translating/fork-edit-link.jpg differ diff --git a/content-images/about/translating/fork-repo.jpg b/content-images/about/translating/fork-repo.jpg new file mode 100644 index 00000000000..2d696ce9567 Binary files /dev/null and b/content-images/about/translating/fork-repo.jpg differ diff --git a/content-images/about/translating/inkscape-export-png.jpg b/content-images/about/translating/inkscape-export-png.jpg new file mode 100644 index 00000000000..07c33d4bba2 Binary files /dev/null and b/content-images/about/translating/inkscape-export-png.jpg differ diff --git a/content-images/about/translating/inkscape-language-subtag.jpg b/content-images/about/translating/inkscape-language-subtag.jpg new file mode 100644 index 00000000000..d7fdcd6b4a6 Binary files /dev/null and b/content-images/about/translating/inkscape-language-subtag.jpg differ diff --git a/content-images/about/translating/inkscape-text-tool.jpg b/content-images/about/translating/inkscape-text-tool.jpg new file mode 100644 index 00000000000..e8fa90f2b56 Binary files /dev/null and b/content-images/about/translating/inkscape-text-tool.jpg differ diff --git a/content-images/about/translating/name-new-file.jpg b/content-images/about/translating/name-new-file.jpg new file mode 100644 index 00000000000..77c49004ab5 Binary files /dev/null and b/content-images/about/translating/name-new-file.jpg differ diff --git a/content-images/about/translating/new-file.jpg b/content-images/about/translating/new-file.jpg new file mode 100644 index 00000000000..3d16821e7bf Binary files /dev/null and b/content-images/about/translating/new-file.jpg differ diff --git a/content-images/about/translating/preview-processing.jpg b/content-images/about/translating/preview-processing.jpg new file mode 100644 index 00000000000..47444517714 Binary files /dev/null and b/content-images/about/translating/preview-processing.jpg differ diff --git a/content-images/about/translating/preview-ready.jpg b/content-images/about/translating/preview-ready.jpg new file mode 100644 index 00000000000..4442606868b Binary files /dev/null and b/content-images/about/translating/preview-ready.jpg differ diff --git a/content-images/about/translating/propose-changes.jpg b/content-images/about/translating/propose-changes.jpg new file mode 100644 index 00000000000..579f04eace9 Binary files /dev/null and b/content-images/about/translating/propose-changes.jpg differ diff --git a/content-images/about/translating/ready-for-review-button.jpg b/content-images/about/translating/ready-for-review-button.jpg new file mode 100644 index 00000000000..b3bd727856d Binary files /dev/null and b/content-images/about/translating/ready-for-review-button.jpg differ diff --git a/content-images/about/translating/ready-for-review-state.jpg b/content-images/about/translating/ready-for-review-state.jpg new file mode 100644 index 00000000000..546a0369d9c Binary files /dev/null and b/content-images/about/translating/ready-for-review-state.jpg differ diff --git a/content-images/about/update_social.png b/content-images/about/update_social.png new file mode 100644 index 00000000000..7f13df9b80a Binary files /dev/null and b/content-images/about/update_social.png differ diff --git a/content-images/about/wai-coop/Fig1.png b/content-images/about/wai-coop/Fig1.png new file mode 100644 index 00000000000..36b82554222 Binary files /dev/null and b/content-images/about/wai-coop/Fig1.png differ diff --git a/content-images/about/wai-coop/Fig2.png b/content-images/about/wai-coop/Fig2.png new file mode 100644 index 00000000000..ad7ff913765 Binary files /dev/null and b/content-images/about/wai-coop/Fig2.png differ diff --git a/content-images/about/wai-coop/fig1_sample_layouts.png b/content-images/about/wai-coop/fig1_sample_layouts.png new file mode 100644 index 00000000000..764e7d33087 Binary files /dev/null and b/content-images/about/wai-coop/fig1_sample_layouts.png differ diff --git a/content-images/about/wai-tools/video1.vtt b/content-images/about/wai-tools/video1.vtt new file mode 100644 index 00000000000..bdc8e78e3da --- /dev/null +++ b/content-images/about/wai-tools/video1.vtt @@ -0,0 +1,712 @@ +WEBVTT + +00:00:00.533 --> 00:00:01.533 align:middle line:79% position:50% size:58% +>> SHADI ABOU-ZAHRA: +Just a little bit about + +00:00:01.600 --> 00:00:03.133 align:middle line:85% position:50% size:48% +the project itself. + +00:00:03.200 --> 00:00:09.933 align:middle line:79% position:50% size:58% +WAI-Tools Project is +an acronym for Advanced + +00:00:10.000 --> 00:00:13.699 align:middle line:79% position:50% size:65% +Decision Support Tools for +Scalable Web Accessibility + +00:00:13.766 --> 00:00:16.133 align:middle line:79% position:50% size:63% +Assessments of the Web +Accessibility Initiative. + +00:00:16.199 --> 00:00:17.533 align:middle line:85% position:50% size:35% +So, WAI-Tools. + +00:00:17.600 --> 00:00:22.000 align:middle line:79% position:50% size:63% +So, it's a project of +the W3C Web Accessibility + +00:00:22.066 --> 00:00:25.533 align:middle line:79% position:50% size:60% +Initiative co-funded by +the European Commission, + +00:00:25.600 --> 00:00:29.133 align:middle line:79% position:50% size:53% +the EC, under the +Horizon 2020 program. + +00:00:29.199 --> 00:00:31.600 align:middle line:79% position:50% size:65% +It's a so-called +Innovation Action project. + +00:00:31.666 --> 00:00:34.200 align:middle line:79% position:50% size:65% +So there are different +types of projects that the + +00:00:34.266 --> 00:00:38.533 align:middle line:79% position:50% size:70% +European Commission supports +and this is one of them. + +00:00:38.600 --> 00:00:41.433 align:middle line:79% position:50% size:53% +It's under the line +of innovation action. + +00:00:41.500 --> 00:00:46.333 align:middle line:79% position:50% size:53% +It started on the 1st +of November in 2017. + +00:00:46.399 --> 00:00:50.700 align:middle line:79% position:50% size:70% +It also - it already +feels like many decades ago. + +00:00:50.766 --> 00:00:53.666 align:middle line:79% position:50% size:65% +And unfortunately +actually, coming to an end + +00:00:53.733 --> 00:00:55.233 align:middle line:85% position:50% size:63% +at the end of this month. + +00:00:55.299 --> 00:00:58.100 align:middle line:79% position:50% size:70% +As I mentioned, it's +been a really great project, + +00:00:58.166 --> 00:01:01.633 align:middle line:79% position:50% size:45% +great fun working +with the partners. + +00:01:01.700 --> 00:01:04.766 align:middle line:79% position:50% size:60% +Speaking of the partners +of the project, the lead + +00:01:04.833 --> 00:01:08.133 align:middle line:79% position:50% size:58% +partner is the European +Research Consortium for + +00:01:08.200 --> 00:01:11.000 align:middle line:79% position:50% size:60% +Informatics and +Mathematics, ERCIM which + +00:01:11.066 --> 00:01:14.299 align:middle line:79% position:50% size:55% +is the +European host for W3C. + +00:01:14.366 --> 00:01:17.966 align:middle line:79% position:50% size:63% +So, this is the legal +footing of W3C in Europe. + +00:01:18.033 --> 00:01:20.866 align:middle line:79% position:50% size:58% +Other partners in the +project are Siteimprove + +00:01:20.933 --> 00:01:23.866 align:middle line:79% position:50% size:60% +from Denmark, the +Accessibility Foundation + +00:01:23.933 --> 00:01:27.233 align:middle line:79% position:50% size:60% +in the Netherlands, the +Norwegian Digitalisation + +00:01:27.299 --> 00:01:32.166 align:middle line:79% position:50% size:48% +Agency, Digdir, and +the Administrative + +00:01:32.233 --> 00:01:34.666 align:middle line:79% position:50% size:60% +Modernization +Agency in Portugal, AMA. + +00:01:34.733 --> 00:01:40.200 align:middle line:79% position:50% size:63% +The University of Lisbon, +FCID, in Portugal, and + +00:01:40.266 --> 00:01:43.700 align:middle line:79% position:50% size:63% +Deque Research which is +based in the Netherlands. + +00:01:43.766 --> 00:01:48.666 align:middle line:79% position:50% size:63% +So, these are the project +partners who together are + +00:01:48.733 --> 00:01:51.099 align:middle line:79% position:50% size:68% +responsible for moving +this forward, this project, + +00:01:51.166 --> 00:01:54.299 align:middle line:79% position:50% size:55% +but really a lot of +this work is happening + +00:01:54.366 --> 00:01:58.333 align:middle line:79% position:50% size:63% +or happened in - directly +in open W3C groups and + +00:01:58.400 --> 00:02:01.166 align:middle line:79% position:50% size:65% +we're going to be talking +more about that, about how + +00:02:01.233 --> 00:02:04.633 align:middle line:79% position:50% size:60% +the project work has +happened and how you can + +00:02:04.700 --> 00:02:07.366 align:middle line:79% position:50% size:65% +still contribute to some +of this project work which + +00:02:07.433 --> 00:02:11.366 align:middle line:79% position:50% size:65% +is going to continue +happening in these groups. + +00:02:11.433 --> 00:02:13.433 align:middle line:79% position:50% size:50% +But just to give you +a little bit of the + +00:02:13.500 --> 00:02:16.233 align:middle line:79% position:50% size:65% +objectives of the project, +what did we really want to + +00:02:16.300 --> 00:02:18.066 align:middle line:85% position:50% size:63% +achieve with the project? + +00:02:18.133 --> 00:02:23.166 align:middle line:79% position:50% size:65% +One of the main objectives +here on the screen is an + +00:02:23.233 --> 00:02:25.500 align:middle line:79% position:50% size:53% +illustration where at +the very top of the + +00:02:25.566 --> 00:02:28.333 align:middle line:79% position:50% size:53% +illustration is kind +of the ultimate goal. + +00:02:28.400 --> 00:02:31.466 align:middle line:79% position:50% size:65% +We all want +compatible results, right? + +00:02:31.533 --> 00:02:33.900 align:middle line:79% position:50% size:55% +When we are evaluating +for the Web Content + +00:02:33.966 --> 00:02:36.300 align:middle line:79% position:50% size:63% +Accessibility Guidelines +which is the layer below, + +00:02:36.366 --> 00:02:38.833 align:middle line:79% position:50% size:60% +we're evaluating for +accessibility or testing + +00:02:38.900 --> 00:02:41.400 align:middle line:79% position:50% size:53% +for accessibility, +we're using different + +00:02:41.466 --> 00:02:43.833 align:middle line:79% position:50% size:45% +methodologies, so +different methods. + +00:02:43.900 --> 00:02:46.166 align:middle line:79% position:50% size:65% +But at the end of the day, +we're testing for the same + +00:02:46.233 --> 00:02:50.266 align:middle line:79% position:50% size:78% +set of guidelines and we +want to get compatible results. + +00:02:50.333 --> 00:02:52.966 align:middle line:79% position:50% size:73% +We want to get the same +results when we test the same + +00:02:53.033 --> 00:02:56.666 align:middle line:79% position:50% size:65% +things, right, no matter +how we actually test them. + +00:02:58.800 --> 00:03:01.433 align:middle line:79% position:50% size:63% +And so, this was really +the focus of the project, + +00:03:01.500 --> 00:03:04.266 align:middle line:79% position:50% size:50% +to look at what we +call ACT test rules, + +00:03:04.333 --> 00:03:07.733 align:middle line:79% position:50% size:65% +Accessibility +Conformance Testing rules. + +00:03:07.800 --> 00:03:11.766 align:middle line:79% position:50% size:50% +The point is these +are very, very small + +00:03:11.833 --> 00:03:14.699 align:middle line:79% position:50% size:55% +procedures for testing +that can be combined + +00:03:14.766 --> 00:03:16.966 align:middle line:79% position:50% size:60% +together for +different methodologies. + +00:03:17.033 --> 00:03:19.733 align:middle line:79% position:50% size:53% +And methodologies +here is a broad term. + +00:03:19.800 --> 00:03:23.000 align:middle line:79% position:50% size:53% +It could be automated +tools or anything. + +00:03:23.066 --> 00:03:26.400 align:middle line:79% position:50% size:58% +Any procedure that +you follow for manually + +00:03:26.466 --> 00:03:29.033 align:middle line:79% position:50% size:50% +checking, this is a +methodology as well. + +00:03:29.099 --> 00:03:31.266 align:middle line:79% position:50% size:65% +So, really, anything +that encodes how you check + +00:03:31.333 --> 00:03:33.433 align:middle line:79% position:50% size:43% +accessibility +is a methodology. + +00:03:33.500 --> 00:03:34.333 align:middle line:85% position:50% size:48% +It could be manual. + +00:03:34.400 --> 00:03:35.433 align:middle line:85% position:50% size:55% +It could be automated. + +00:03:35.500 --> 00:03:39.933 align:middle line:79% position:50% size:80% +It could be a combination, +semi-automated, so a combination + +00:03:40.000 --> 00:03:44.333 align:middle line:79% position:50% size:75% +of automated and non-automated +approaches together. + +00:03:44.400 --> 00:03:48.066 align:middle line:79% position:50% size:63% +So, if we have very small +building blocks, and they + +00:03:48.133 --> 00:03:51.800 align:middle line:79% position:50% size:65% +are depicted here as these +small boxes, these small + +00:03:51.866 --> 00:03:55.400 align:middle line:79% position:50% size:65% +squares, very, very small, +like Lego pieces, we can + +00:03:55.466 --> 00:03:58.900 align:middle line:79% position:50% size:55% +combine these together +to provide different + +00:03:58.966 --> 00:04:02.699 align:middle line:79% position:50% size:65% +methodologies but at the +end of the day, if we test + +00:04:02.766 --> 00:04:05.533 align:middle line:79% position:50% size:58% +the same thing, we want +to get the same result. + +00:04:05.599 --> 00:04:08.766 align:middle line:79% position:50% size:65% +So, that's really the +objective or the vision of + +00:04:08.833 --> 00:04:11.500 align:middle line:79% position:50% size:65% +the project is to try to +get consistency because we + +00:04:11.566 --> 00:04:14.900 align:middle line:79% position:50% size:60% +know that one of the big +issues of accessibility + +00:04:14.966 --> 00:04:17.933 align:middle line:79% position:50% size:65% +right now is that we don't +have that consistency. + +00:04:18.000 --> 00:04:20.566 align:middle line:79% position:50% size:58% +We have different tools +that provide different + +00:04:20.633 --> 00:04:23.199 align:middle line:79% position:50% size:50% +results or different +evaluation methods. + +00:04:23.266 --> 00:04:26.100 align:middle line:79% position:50% size:65% +So, you go to one person +and you get one result and + +00:04:26.166 --> 00:04:28.566 align:middle line:79% position:50% size:65% +then sometimes you can get +slightly different results + +00:04:28.633 --> 00:04:30.899 align:middle line:79% position:50% size:65% +when you go to a different +person to get it checked. + +00:04:30.966 --> 00:04:33.433 align:middle line:79% position:50% size:60% +That's a big problem for +accessibility that we're + +00:04:33.500 --> 00:04:36.466 align:middle line:79% position:50% size:80% +trying to address to have +more consistency of how we test. + +00:04:39.000 --> 00:04:43.500 align:middle line:79% position:50% size:65% +Another objective of the +project, by defining these + +00:04:43.566 --> 00:04:45.933 align:middle line:79% position:50% size:60% +checks and finally +agreeing on them and not + +00:04:46.000 --> 00:04:49.733 align:middle line:79% position:50% size:53% +having to constantly +re-repeat how we test + +00:04:49.800 --> 00:04:53.266 align:middle line:79% position:50% size:63% +certain things, how do we +test the text alternative + +00:04:53.333 --> 00:04:55.199 align:middle line:79% position:50% size:38% +or how do we +test the label. + +00:04:55.266 --> 00:04:57.833 align:middle line:79% position:50% size:60% +If we can finally decide +and agree on a common + +00:04:57.899 --> 00:05:00.433 align:middle line:79% position:50% size:55% +interpretation for +these checks, maybe we + +00:05:00.500 --> 00:05:02.933 align:middle line:79% position:50% size:48% +can increase +automation in that. + +00:05:03.000 --> 00:05:06.000 align:middle line:79% position:50% size:58% +Right now, a very large +part of accessibility + +00:05:06.066 --> 00:05:07.300 align:middle line:79% position:50% size:40% +needs to be +tested manually. + +00:05:07.366 --> 00:05:10.466 align:middle line:79% position:50% size:75% +But we also know that +there is a lot of advancements + +00:05:10.533 --> 00:05:12.566 align:middle line:85% position:50% size:70% +happening in the technology. + +00:05:12.633 --> 00:05:16.633 align:middle line:79% position:50% size:58% +Buzzwords, artificial +intelligence or machine + +00:05:16.699 --> 00:05:19.566 align:middle line:79% position:50% size:50% +learning, natural +language processing. + +00:05:19.633 --> 00:05:23.466 align:middle line:79% position:50% size:65% +All of these technologies +are evolving, all of these + +00:05:23.533 --> 00:05:25.533 align:middle line:79% position:50% size:58% +innovations that would +essentially allow us to + +00:05:25.600 --> 00:05:27.766 align:middle line:79% position:50% size:50% +increase the +level of automation. + +00:05:27.833 --> 00:05:29.600 align:middle line:79% position:50% size:53% +It doesn't mean that +accessibility will be + +00:05:29.666 --> 00:05:32.800 align:middle line:79% position:50% size:60% +fully automatable, at +least in the foreseeable + +00:05:32.866 --> 00:05:36.500 align:middle line:79% position:50% size:58% +future, but at least we +can maybe increase the + +00:05:36.566 --> 00:05:40.266 align:middle line:79% position:50% size:55% +amount of tool support +which would be - would + +00:05:40.333 --> 00:05:42.666 align:middle line:79% position:50% size:58% +make things much more +effective and much more + +00:05:42.733 --> 00:05:47.633 align:middle line:79% position:50% size:70% +efficient if we can increase +this level of automation. + +00:05:47.699 --> 00:05:50.100 align:middle line:79% position:50% size:60% +So, these are really the +objectives or the core + +00:05:50.166 --> 00:05:51.699 align:middle line:85% position:50% size:55% +pieces of the project. + +00:05:51.766 --> 00:05:54.800 align:middle line:79% position:50% size:65% +The deliverables of the +project, which we're going + +00:05:54.866 --> 00:05:57.533 align:middle line:79% position:50% size:63% +to go into more detail, +but hopefully from a more + +00:05:57.600 --> 00:06:01.766 align:middle line:79% position:50% size:63% +applied side, is the main +part of the project, the + +00:06:01.833 --> 00:06:05.899 align:middle line:79% position:50% size:63% +heart piece is to develop +Accessibility Conformance + +00:06:05.966 --> 00:06:07.633 align:middle line:85% position:50% size:53% +Testing rules, right. + +00:06:07.699 --> 00:06:10.100 align:middle line:79% position:50% size:60% +So, this is to create an +initial set hopefully to + +00:06:10.166 --> 00:06:14.066 align:middle line:79% position:50% size:60% +get the work in the +community kickstarted to + +00:06:14.133 --> 00:06:16.466 align:middle line:79% position:50% size:53% +help establish an +active community that + +00:06:16.533 --> 00:06:20.133 align:middle line:79% position:50% size:48% +will sustain +beyond the project. + +00:06:20.199 --> 00:06:24.866 align:middle line:79% position:50% size:65% +So, this is a call on - +call for action for you to + +00:06:24.933 --> 00:06:27.333 align:middle line:79% position:50% size:58% +get involved in this +work that will continue + +00:06:27.399 --> 00:06:29.466 align:middle line:85% position:50% size:48% +beyond the project. + +00:06:29.533 --> 00:06:33.600 align:middle line:79% position:50% size:65% +We implemented these +rules that we developed in + +00:06:33.666 --> 00:06:36.300 align:middle line:79% position:50% size:58% +different tools from +project partners in the + +00:06:36.366 --> 00:06:41.100 align:middle line:79% position:50% size:65% +project, here specifically +the partners Siteimprove + +00:06:41.166 --> 00:06:47.466 align:middle line:79% position:50% size:58% +in their checker called +Alfa, in Deque in their + +00:06:47.533 --> 00:06:52.166 align:middle line:79% position:50% size:60% +checker called Axe-core, +and in - a tool from the + +00:06:52.233 --> 00:06:56.300 align:middle line:79% position:50% size:63% +University of Lisbon in +a checker called QualWeb. + +00:06:56.366 --> 00:06:59.933 align:middle line:79% position:50% size:75% +And we also validated these +rules in real-life situations. + +00:07:00.000 --> 00:07:02.833 align:middle line:79% position:50% size:60% +We had a set of websites +that we took and that we + +00:07:02.899 --> 00:07:05.600 align:middle line:85% position:50% size:58% +ran these rules across. + +00:07:05.666 --> 00:07:09.133 align:middle line:79% position:50% size:65% +Now, these rules were +applied in different ways, + +00:07:09.199 --> 00:07:13.800 align:middle line:79% position:50% size:65% +were test-driven, or were +taken up by the Portuguese + +00:07:13.866 --> 00:07:15.333 align:middle line:79% position:50% size:35% +and Norwegian +observatories. + +00:07:15.399 --> 00:07:18.133 align:middle line:79% position:50% size:63% +These were part of the +project to kind of really + +00:07:18.199 --> 00:07:22.600 align:middle line:79% position:50% size:68% +ground us and look at when +entire countries want to do + +00:07:22.666 --> 00:07:25.666 align:middle line:79% position:50% size:55% +large scale monitoring +of accessibility, what + +00:07:25.733 --> 00:07:26.433 align:middle line:85% position:50% size:40% +are their needs? + +00:07:26.500 --> 00:07:27.600 align:middle line:79% position:50% size:40% +What can we +learn from that? + +00:07:27.666 --> 00:07:30.666 align:middle line:79% position:50% size:63% +And how can we provide +them with different types + +00:07:30.733 --> 00:07:33.733 align:middle line:79% position:50% size:50% +of rules that +will support getting + +00:07:33.800 --> 00:07:36.633 align:middle line:85% position:50% size:65% +them valuable information? + +00:07:36.699 --> 00:07:41.899 align:middle line:79% position:50% size:73% +We looked at improving the +existing WCAG-EM Report Tool. + +00:07:41.966 --> 00:07:44.199 align:middle line:79% position:50% size:53% +So, people +might know that tool. + +00:07:44.266 --> 00:07:45.933 align:middle line:85% position:50% size:48% +It already existed. + +00:07:46.000 --> 00:07:49.333 align:middle line:79% position:50% size:58% +WCAG-EM stands for WCAG +evaluation methodology. + +00:07:49.399 --> 00:07:51.100 align:middle line:79% position:50% size:50% +It actually has a +longer name, Website + +00:07:51.166 --> 00:07:54.533 align:middle line:79% position:50% size:63% +Accessibility Conformance +Evaluation Methodology. + +00:07:54.600 --> 00:07:55.600 align:middle line:85% position:50% size:43% +It's a long name. + +00:07:55.666 --> 00:07:59.666 align:middle line:79% position:50% size:63% +But this is an evaluation +methodology for WCAG that + +00:07:59.733 --> 00:08:02.600 align:middle line:79% position:50% size:53% +can be used to +test entire websites. + +00:08:02.666 --> 00:08:05.399 align:middle line:79% position:50% size:63% +And it was a tool to help +you walk through that and + +00:08:05.466 --> 00:08:08.100 align:middle line:79% position:50% size:65% +now we have added to that +tool an import function so + +00:08:08.166 --> 00:08:11.333 align:middle line:79% position:50% size:60% +that you can import test +results from automated + +00:08:11.399 --> 00:08:14.166 align:middle line:79% position:50% size:63% +tools, again here to +show the idea that we can + +00:08:14.233 --> 00:08:18.100 align:middle line:79% position:50% size:58% +combine automated and +manual when we're using + +00:08:18.166 --> 00:08:21.566 align:middle line:85% position:50% size:50% +open standards here. + +00:08:21.633 --> 00:08:26.566 align:middle line:79% position:50% size:80% +We also created an accessibility +statements generator tool. + +00:08:26.633 --> 00:08:30.433 align:middle line:79% position:50% size:63% +So, this helps you create +accessibility statements + +00:08:30.500 --> 00:08:33.500 align:middle line:79% position:50% size:63% +for your website once you +have done the testing to + +00:08:33.566 --> 00:08:37.166 align:middle line:79% position:50% size:60% +explain to the world the +accessibility that you + +00:08:37.233 --> 00:08:39.366 align:middle line:85% position:50% size:53% +have on your website. + +00:08:39.433 --> 00:08:44.500 align:middle line:79% position:50% size:63% +And last but not least, a +demo, a proof of concept + +00:08:44.566 --> 00:08:47.466 align:middle line:79% position:50% size:58% +to show, to demonstrate +the potential of open + +00:08:47.533 --> 00:08:49.700 align:middle line:79% position:50% size:45% +linked data +for accessibility. + +00:08:49.766 --> 00:08:54.600 align:middle line:79% position:50% size:63% +So, what if - right - +what if we had these test + +00:08:54.666 --> 00:08:59.100 align:middle line:79% position:50% size:60% +results being published, +at least those by public + +00:08:59.166 --> 00:09:01.200 align:middle line:79% position:50% size:65% +bodies, and we could +connect these and we could + +00:09:01.266 --> 00:09:03.600 align:middle line:79% position:50% size:63% +scrape these and we +could do a more active or + +00:09:03.666 --> 00:09:07.533 align:middle line:79% position:50% size:50% +decentralized +monitoring approach? + +00:09:07.600 --> 00:09:09.399 align:middle line:79% position:50% size:53% +So, these are the +project deliverables. + +00:09:09.466 --> 00:09:12.733 align:middle line:79% position:50% size:45% +More information +is on the website. + +00:09:12.799 --> 00:09:17.366 align:middle line:79% position:50% size:63% +We will provide you these +links at the end again so + +00:09:17.433 --> 00:09:20.766 align:middle line:79% position:50% size:58% +you don't need to start +looking at the links. + +00:09:20.833 --> 00:09:27.100 align:middle line:79% position:50% size:43% +The address is +w3.org/WAI/Tools. + +00:09:27.166 --> 00:09:30.000 align:middle line:79% position:50% size:63% +That is the project page +for the WAI-Tools Project + +00:09:30.066 --> 00:09:32.666 align:middle line:79% position:50% size:60% +and you can find here +further information, all + +00:09:32.733 --> 00:09:36.000 align:middle line:79% position:50% size:58% +the resources, all the +deliverables, including + +00:09:36.066 --> 00:09:39.600 align:middle line:79% position:50% size:65% +this open meeting with +the slides online as well. diff --git a/content-images/about/wai-tools/video2.vtt b/content-images/about/wai-tools/video2.vtt new file mode 100644 index 00000000000..237b84878bd --- /dev/null +++ b/content-images/about/wai-tools/video2.vtt @@ -0,0 +1,2984 @@ +WEBVTT + +00:00:00.066 --> 00:00:01.533 align:middle line:79% position:50% size:58% +>> SHADI ABOU-ZAHRA: +So, let's move directly + +00:00:01.600 --> 00:00:04.299 align:middle line:79% position:50% size:53% +to our first session +on the development of + +00:00:04.366 --> 00:00:06.900 align:middle line:79% position:50% size:65% +Accessibility +Conformance Testing rules. + +00:00:06.966 --> 00:00:12.500 align:middle line:79% position:50% size:65% +I want to ask on the +virtual stage, Wilco Fiers + +00:00:12.566 --> 00:00:19.666 align:middle line:79% position:50% size:70% +from Deque and Kasper Isager +from Siteimprove and Carlos + +00:00:19.733 --> 00:00:24.133 align:middle line:79% position:50% size:65% +Duarte - Carlos Duarte +from University of Lisbon. + +00:00:24.199 --> 00:00:27.266 align:middle line:79% position:50% size:63% +If the three of you could +just briefly - Wilco can + +00:00:27.333 --> 00:00:29.266 align:middle line:79% position:50% size:63% +you say a few words about +yourself, just introduce + +00:00:29.333 --> 00:00:30.966 align:middle line:85% position:50% size:63% +yourself to the audience. + +00:00:31.033 --> 00:00:32.200 align:middle line:79% position:50% size:38% +>> WILCO FIERS: +I sure can. + +00:00:32.266 --> 00:00:34.899 align:middle line:79% position:50% size:40% +Can you guys +hear me alright? + +00:00:34.966 --> 00:00:36.233 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Yep, all fine. + +00:00:36.299 --> 00:00:37.066 align:middle line:79% position:50% size:38% +>> WILCO FIERS: +Fantastic. + +00:00:37.133 --> 00:00:38.799 align:middle line:79% position:50% size:50% +Alright, so my +name is Wilco Fiers. + +00:00:38.866 --> 00:00:40.899 align:middle line:85% position:50% size:63% +I work for Deque Systems. + +00:00:40.966 --> 00:00:43.833 align:middle line:79% position:50% size:50% +I am a product owner +and project manager. + +00:00:43.899 --> 00:00:48.166 align:middle line:79% position:50% size:73% +There, I am responsible +for Axe-core which is Deque's + +00:00:48.233 --> 00:00:52.833 align:middle line:79% position:50% size:73% +open source accessibility +engine which Shadi mentioned. + +00:00:52.899 --> 00:00:56.366 align:middle line:79% position:50% size:58% +In addition to that, +I spend part of my time + +00:00:56.433 --> 00:00:58.266 align:middle line:85% position:50% size:45% +on W3C activities. + +00:00:58.333 --> 00:01:04.566 align:middle line:79% position:50% size:63% +I am the chair of the ACT +Rules Community Group, a + +00:01:04.633 --> 00:01:08.966 align:middle line:79% position:50% size:60% +facilitator on the ACT +Task Force, as well as a + +00:01:09.033 --> 00:01:13.000 align:middle line:79% position:50% size:50% +member of the Silver +Task Force which is + +00:01:13.066 --> 00:01:17.200 align:middle line:85% position:50% size:70% +currently developing WCAG 3. + +00:01:17.266 --> 00:01:18.566 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Thank you, Wilco. + +00:01:18.633 --> 00:01:21.700 align:middle line:79% position:50% size:63% +And all these groups that +Wilco mentioned, the task + +00:01:21.766 --> 00:01:23.933 align:middle line:79% position:50% size:53% +force and community +group and so on, will + +00:01:24.000 --> 00:01:25.766 align:middle line:79% position:50% size:45% +be explained in +this presentation. + +00:01:25.833 --> 00:01:27.500 align:middle line:79% position:50% size:45% +>> WILCO FIERS: +Don't worry about. + +00:01:27.566 --> 00:01:28.233 align:middle line:85% position:50% size:33% +We'll get it. + +00:01:28.299 --> 00:01:30.233 align:middle line:85% position:50% size:40% +We'll get to it. + +00:01:30.299 --> 00:01:31.566 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +We'll get there. + +00:01:31.633 --> 00:01:32.233 align:middle line:85% position:50% size:38% +Piece by piece. + +00:01:32.299 --> 00:01:33.666 align:middle line:85% position:50% size:60% +We love acronyms in W3C. + +00:01:33.733 --> 00:01:36.733 align:middle line:79% position:50% size:78% +>> WILCO FIERS: I have +too many hats also, to be fair. + +00:01:36.799 --> 00:01:40.599 align:middle line:79% position:50% size:70% +>> SHADI ABOU-ZAHRA: Kasper, +please introduce yourself. + +00:01:40.666 --> 00:01:41.933 align:middle line:79% position:50% size:65% +>> KASPER ISAGER: +Yes, I will make it short. + +00:01:42.000 --> 00:01:43.466 align:middle line:85% position:50% size:65% +I don't have as many hats. + +00:01:43.533 --> 00:01:46.700 align:middle line:79% position:50% size:63% +My name is Kasper Isager +and I work as the product + +00:01:46.766 --> 00:01:49.299 align:middle line:79% position:50% size:38% +owner of Alfa +at Siteimprove. + +00:01:49.366 --> 00:01:52.033 align:middle line:79% position:50% size:43% +And then Alfa is +our accessibility + +00:01:52.099 --> 00:01:54.099 align:middle line:85% position:50% size:60% +conformance test engine. + +00:01:56.500 --> 00:01:57.099 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Welcome. + +00:01:57.166 --> 00:02:01.566 align:middle line:79% position:50% size:53% +Well, you don't +need to be so humble. + +00:02:01.633 --> 00:02:05.433 align:middle line:79% position:50% size:65% +Kasper is a mastermind of +a lot of the accessibility + +00:02:05.500 --> 00:02:10.500 align:middle line:79% position:50% size:73% +engines at +Siteimprove as far as I know. + +00:02:14.166 --> 00:02:16.199 align:middle line:79% position:50% size:55% +>> CARLOS DUARTE: +Hi, I'm Carlos Duarte. + +00:02:16.266 --> 00:02:19.400 align:middle line:79% position:50% size:65% +I teach - well my main +responsibility is to teach + +00:02:19.466 --> 00:02:23.933 align:middle line:79% position:50% size:75% +computer science at the +University of Lisbon, although + +00:02:24.000 --> 00:02:28.166 align:middle line:79% position:50% size:63% +my research is focused a +lot on web accessibility. + +00:02:28.233 --> 00:02:31.433 align:middle line:79% position:50% size:70% +And so, as part of my +responsibilities in teaching + +00:02:31.500 --> 00:02:35.699 align:middle line:79% position:50% size:73% +computer science, I also +teach web accessibility to my + +00:02:35.766 --> 00:02:40.599 align:middle line:79% position:50% size:50% +students and I also +have some more hats. + +00:02:40.666 --> 00:02:46.033 align:middle line:79% position:50% size:58% +Together with Wilco, +I'm co-chairing the ACT + +00:02:46.099 --> 00:02:52.066 align:middle line:79% position:50% size:60% +Community Group and also +another hat for the W3C + +00:02:52.133 --> 00:02:53.900 align:middle line:79% position:50% size:58% +where I do work for the +Education and Outreach + +00:02:53.966 --> 00:02:59.966 align:middle line:79% position:50% size:68% +Working Group, specifically +on preparing the new - the + +00:03:00.033 --> 00:03:02.966 align:middle line:79% position:50% size:60% +curricula, the new web +accessibility curricula. + +00:03:05.900 --> 00:03:07.166 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Thank you, Carlos. + +00:03:07.233 --> 00:03:09.500 align:middle line:85% position:50% size:50% +And Wilco, go ahead. + +00:03:09.566 --> 00:03:13.000 align:middle line:79% position:50% size:55% +Take it away please +with the presentation. + +00:03:18.566 --> 00:03:19.833 align:middle line:79% position:50% size:35% +And this time +you are muted. + +00:03:19.900 --> 00:03:21.466 align:middle line:79% position:50% size:40% +>> WILCO FIERS: +I should unmute. + +00:03:21.533 --> 00:03:22.866 align:middle line:79% position:50% size:45% +Do you mind going +to the next slide? + +00:03:28.166 --> 00:03:29.566 align:middle line:85% position:50% size:50% +Thank you very much. + +00:03:29.633 --> 00:03:35.333 align:middle line:79% position:50% size:75% +So, I just wanted to get into +the goals of ACT really quick. + +00:03:35.400 --> 00:03:37.566 align:middle line:79% position:50% size:43% +Again, ACT stands +for Accessibility + +00:03:37.633 --> 00:03:39.300 align:middle line:85% position:50% size:50% +Conformance Testing. + +00:03:39.366 --> 00:03:42.733 align:middle line:79% position:50% size:68% +The project wasn't +setup as part of WAI-Tools. + +00:03:42.800 --> 00:03:46.300 align:middle line:79% position:50% size:43% +It predates it +by several years. + +00:03:46.366 --> 00:03:51.066 align:middle line:79% position:50% size:65% +Some of you may also know +it as Auto-WCAG which is a + +00:03:51.133 --> 00:03:57.199 align:middle line:79% position:50% size:78% +group that later got +renamed to the ACT Rules Group. + +00:04:00.300 --> 00:04:03.633 align:middle line:79% position:50% size:68% +And the reason that +group was started off is to + +00:04:03.699 --> 00:04:07.199 align:middle line:79% position:50% size:55% +improve consistency in +accessibility testing. + +00:04:07.266 --> 00:04:10.833 align:middle line:79% position:50% size:65% +So, one of the greater +challenges Shadi mentioned + +00:04:10.900 --> 00:04:13.199 align:middle line:85% position:50% size:55% +is consistent testing. + +00:04:13.266 --> 00:04:17.466 align:middle line:79% position:50% size:68% +You do want, when you +get a report from somebody, + +00:04:17.533 --> 00:04:22.466 align:middle line:79% position:50% size:53% +for that report to be +accurate and based on + +00:04:22.533 --> 00:04:25.000 align:middle line:85% position:50% size:48% +well-founded ideas. + +00:04:25.066 --> 00:04:29.233 align:middle line:79% position:50% size:65% +Otherwise, you might +fix something and you have + +00:04:29.300 --> 00:04:32.466 align:middle line:79% position:50% size:73% +somebody else take a look at +it and they find more things. + +00:04:32.533 --> 00:04:35.533 align:middle line:79% position:50% size:60% +Or they may find +completely contradicting + +00:04:35.600 --> 00:04:40.500 align:middle line:79% position:50% size:48% +things which can be +fairly frustrating. + +00:04:40.566 --> 00:04:44.399 align:middle line:79% position:50% size:63% +If you don't have a clear +definition of when your + +00:04:44.466 --> 00:04:48.300 align:middle line:79% position:50% size:58% +project is sufficiently +accessible, that makes + +00:04:48.366 --> 00:04:51.966 align:middle line:85% position:50% size:43% +life pretty hard. + +00:04:52.033 --> 00:04:58.000 align:middle line:79% position:50% size:63% +This kind of gets +exacerbated when you look + +00:04:58.066 --> 00:05:01.800 align:middle line:79% position:50% size:65% +at something like European +Union which is starting to + +00:05:01.866 --> 00:05:07.766 align:middle line:79% position:50% size:65% +track the accessibility of +government websites across + +00:05:07.833 --> 00:05:09.466 align:middle line:85% position:50% size:50% +the whole of Europe. + +00:05:09.533 --> 00:05:12.433 align:middle line:79% position:50% size:68% +If Denmark tests things +in a different way than the + +00:05:12.500 --> 00:05:15.399 align:middle line:79% position:50% size:65% +Netherlands does, what +you'll end up with is with + +00:05:15.466 --> 00:05:20.033 align:middle line:79% position:50% size:58% +results that don't - +aren't very comparable. + +00:05:20.100 --> 00:05:25.033 align:middle line:79% position:50% size:65% +Even worse, you might have +a website that is relevant + +00:05:25.100 --> 00:05:27.733 align:middle line:79% position:50% size:63% +in multiple countries and +if they used different + +00:05:27.800 --> 00:05:30.466 align:middle line:79% position:50% size:60% +testing methodologies to +gauge whether or not you + +00:05:30.533 --> 00:05:33.533 align:middle line:79% position:50% size:63% +complied with the law - +because that's what we're + +00:05:33.600 --> 00:05:37.866 align:middle line:79% position:50% size:68% +getting to at a certain +point; it should be legally + +00:05:37.933 --> 00:05:40.766 align:middle line:79% position:50% size:58% +required to meet these +standards - if they are + +00:05:40.833 --> 00:05:45.300 align:middle line:79% position:50% size:65% +testing along different +methods, you may find that + +00:05:45.366 --> 00:05:49.066 align:middle line:79% position:50% size:70% +your website complies in one +country but not in the other + +00:05:49.133 --> 00:05:53.500 align:middle line:79% position:50% size:73% +even though you're testing +along the same WCAG standard. + +00:05:53.566 --> 00:05:56.566 align:middle line:79% position:50% size:53% +So, that's really the +issue and the problem + +00:05:56.633 --> 00:05:57.933 align:middle line:85% position:50% size:45% +isn't too extreme. + +00:05:58.000 --> 00:06:00.833 align:middle line:79% position:50% size:63% +Like, it's not like WCAG +is all over the place and + +00:06:00.899 --> 00:06:04.899 align:middle line:85% position:50% size:50% +results go anywhere. + +00:06:04.966 --> 00:06:11.466 align:middle line:79% position:50% size:68% +But there are differences +there and it kind of erodes + +00:06:11.533 --> 00:06:15.000 align:middle line:79% position:50% size:68% +the trust that you can +get from results like that. + +00:06:15.066 --> 00:06:20.566 align:middle line:79% position:50% size:50% +So, that is what +ACT was setup to do. + +00:06:20.633 --> 00:06:25.600 align:middle line:79% position:50% size:63% +And our goal there is, as +I mentioned, we started + +00:06:25.666 --> 00:06:29.199 align:middle line:79% position:50% size:65% +first off with Auto-WCAG +which is really focused on + +00:06:29.266 --> 00:06:31.100 align:middle line:85% position:50% size:65% +doing this for automation. + +00:06:31.166 --> 00:06:35.933 align:middle line:79% position:50% size:63% +But then we broadened +it out to address testing + +00:06:36.000 --> 00:06:37.800 align:middle line:85% position:50% size:55% +methodologies as well. + +00:06:37.866 --> 00:06:40.433 align:middle line:79% position:50% size:63% +And testing methodology +is not a well-established + +00:06:40.500 --> 00:06:44.500 align:middle line:79% position:50% size:58% +term but if you ask +around in accessibility + +00:06:44.566 --> 00:06:47.833 align:middle line:79% position:50% size:60% +institutions around the +world, most of them have + +00:06:47.899 --> 00:06:52.633 align:middle line:79% position:50% size:65% +some sort of document that +tells you how to interpret + +00:06:52.699 --> 00:06:55.833 align:middle line:79% position:50% size:70% +WCAG, like what you do +with this particular element + +00:06:55.899 --> 00:06:59.466 align:middle line:79% position:50% size:73% +on a page and with that +particular element on a page. + +00:06:59.533 --> 00:07:04.166 align:middle line:79% position:50% size:78% +One of the great things +about the kind of accessibility + +00:07:04.233 --> 00:07:11.033 align:middle line:79% position:50% size:68% +guidelines is that it was +written in a way that it is + +00:07:11.100 --> 00:07:14.466 align:middle line:79% position:50% size:70% +technology agnostic so that +it can live for a long time, + +00:07:14.533 --> 00:07:18.633 align:middle line:79% position:50% size:70% +so that it isn't just +applicable to one particular + +00:07:18.699 --> 00:07:21.966 align:middle line:79% position:50% size:73% +version of the web standards, +but it should be applicable + +00:07:22.033 --> 00:07:24.000 align:middle line:85% position:50% size:38% +to all of them. + +00:07:24.066 --> 00:07:26.733 align:middle line:79% position:50% size:63% +But that also means there +is some vagueness there + +00:07:26.800 --> 00:07:30.800 align:middle line:79% position:50% size:50% +that does require +that interpretation. + +00:07:30.866 --> 00:07:33.666 align:middle line:79% position:50% size:63% +So those interpretations, +those are what we are + +00:07:33.733 --> 00:07:38.199 align:middle line:79% position:50% size:65% +trying to capture with the +ACT Rules and so this is + +00:07:38.266 --> 00:07:42.633 align:middle line:79% position:50% size:65% +applicable really to lots +and lots of organizations. + +00:07:42.699 --> 00:07:44.699 align:middle line:85% position:50% size:45% +Next slide please. + +00:07:49.866 --> 00:07:53.933 align:middle line:79% position:50% size:68% +So, we've developed this +rules format, the ACT Rules + +00:07:54.000 --> 00:07:59.899 align:middle line:79% position:50% size:60% +Format, and the benefits +of that are fairly wide. + +00:07:59.966 --> 00:08:04.233 align:middle line:79% position:50% size:58% +So, like I mentioned, +we've been working with + +00:08:04.300 --> 00:08:06.366 align:middle line:79% position:50% size:45% +this format for +several years now. + +00:08:06.433 --> 00:08:09.199 align:middle line:79% position:50% size:65% +That is really the goal +of this project, is to use + +00:08:09.266 --> 00:08:12.033 align:middle line:79% position:50% size:50% +the rules format and +develop these rules. + +00:08:12.100 --> 00:08:16.300 align:middle line:79% position:50% size:80% +And it has really improved +the quality of the organizations + +00:08:16.366 --> 00:08:21.266 align:middle line:79% position:50% size:75% +that have adopted this, +like in noticeable ways things + +00:08:21.333 --> 00:08:24.500 align:middle line:79% position:50% size:40% +have gotten +more consistent. + +00:08:24.566 --> 00:08:27.433 align:middle line:79% position:50% size:63% +That has been between the +project partners but also + +00:08:27.500 --> 00:08:31.600 align:middle line:79% position:50% size:65% +organizations outside of +the WAI-Tools Project that + +00:08:31.666 --> 00:08:35.766 align:middle line:79% position:50% size:65% +have been using the ACT +Rules to make their tools, + +00:08:35.833 --> 00:08:38.533 align:middle line:79% position:50% size:75% +to make their testing +methodologies more consistent. + +00:08:38.600 --> 00:08:41.733 align:middle line:79% position:50% size:68% +So, we are seeing +concrete evidence that this + +00:08:41.799 --> 00:08:43.533 align:middle line:79% position:50% size:35% +approach seems +to be working. + +00:08:46.566 --> 00:08:52.666 align:middle line:79% position:50% size:58% +By writing these rules, +we then have a more + +00:08:52.733 --> 00:08:57.766 align:middle line:79% position:50% size:58% +authoritative set of +things that we know are + +00:08:57.833 --> 00:09:02.966 align:middle line:79% position:50% size:65% +conforming to WCAG or are +not conforming to WCAG and + +00:09:03.033 --> 00:09:06.399 align:middle line:79% position:50% size:63% +what we've ended up doing +is to share these rules + +00:09:06.466 --> 00:09:09.500 align:middle line:79% position:50% size:53% +with the W3C and to +have those published. + +00:09:09.566 --> 00:09:12.766 align:middle line:79% position:50% size:68% +There are currently eleven +of the rules that have been + +00:09:12.833 --> 00:09:15.399 align:middle line:79% position:50% size:65% +developed, that have +been published by the W3C. + +00:09:15.466 --> 00:09:19.100 align:middle line:79% position:50% size:58% +So, these are really +validated by the people + +00:09:19.166 --> 00:09:21.566 align:middle line:85% position:50% size:60% +that wrote the standard. + +00:09:21.633 --> 00:09:24.233 align:middle line:79% position:50% size:60% +So, this gives us a lot +more credibility as well + +00:09:24.299 --> 00:09:25.466 align:middle line:79% position:50% size:40% +for the work +that we've done. + +00:09:25.533 --> 00:09:29.433 align:middle line:79% position:50% size:65% +This is the way you're +supposed to interpret WCAG + +00:09:29.500 --> 00:09:35.866 align:middle line:79% position:50% size:53% +for the HTML standard +or for SVG documents. + +00:09:35.933 --> 00:09:38.966 align:middle line:79% position:50% size:53% +And by doing this, we +resolve a lot of open + +00:09:39.033 --> 00:09:41.100 align:middle line:85% position:50% size:48% +questions for WCAG. + +00:09:41.166 --> 00:09:49.399 align:middle line:79% position:50% size:65% +One example of such a +question is if you have an + +00:09:49.466 --> 00:09:53.899 align:middle line:79% position:50% size:63% +image with the text right +next to it and the image + +00:09:53.966 --> 00:09:58.033 align:middle line:79% position:50% size:65% +basically says the same as +the text, does that image + +00:09:58.100 --> 00:10:01.866 align:middle line:79% position:50% size:48% +need to have a text +alternative or not? + +00:10:01.933 --> 00:10:05.133 align:middle line:79% position:50% size:43% +It's kind of a +debated question. + +00:10:05.200 --> 00:10:08.200 align:middle line:79% position:50% size:53% +Some accessibility +experts say yes, some + +00:10:08.266 --> 00:10:10.433 align:middle line:79% position:50% size:63% +accessibility experts say +no, you absolutely should + +00:10:10.500 --> 00:10:14.866 align:middle line:79% position:50% size:68% +not do that, and the rules +answer this question for us + +00:10:14.933 --> 00:10:18.766 align:middle line:79% position:50% size:55% +definitively so we do +not have to have these + +00:10:18.833 --> 00:10:21.200 align:middle line:85% position:50% size:55% +conversations anymore. + +00:10:21.266 --> 00:10:25.733 align:middle line:79% position:50% size:50% +It clears things +up in a lot of ways. + +00:10:25.799 --> 00:10:31.200 align:middle line:79% position:50% size:65% +And by clearing this up, +we enable organizations to + +00:10:31.266 --> 00:10:36.666 align:middle line:79% position:50% size:70% +test more consistently, +which - allows accessibility + +00:10:36.733 --> 00:10:39.066 align:middle line:79% position:50% size:50% +results to be +more widely trusted. + +00:10:39.133 --> 00:10:43.466 align:middle line:79% position:50% size:55% +You can trust that one +organization using ACT + +00:10:43.533 --> 00:10:49.833 align:middle line:79% position:50% size:70% +Rules will produce almost +identical results to another + +00:10:49.899 --> 00:10:53.200 align:middle line:79% position:50% size:63% +organization using +those same sets of rules. + +00:10:53.266 --> 00:10:55.299 align:middle line:79% position:50% size:63% +There can still be +small differences but the + +00:10:55.366 --> 00:10:58.700 align:middle line:79% position:50% size:68% +differences are far smaller +than what they used to be. + +00:10:58.766 --> 00:11:01.166 align:middle line:85% position:50% size:45% +Next slide please. + +00:11:07.100 --> 00:11:09.733 align:middle line:79% position:50% size:68% +So, just to get into +some of the things that you + +00:11:09.799 --> 00:11:12.000 align:middle line:85% position:50% size:48% +get from ACT Rules. + +00:11:12.066 --> 00:11:16.100 align:middle line:79% position:50% size:63% +I think I might have been +going over time already, + +00:11:16.166 --> 00:11:17.600 align:middle line:85% position:50% size:60% +so I'll keep that quick. + +00:11:17.666 --> 00:11:23.266 align:middle line:79% position:50% size:65% +So, ACT Rules use +unambiguous language which + +00:11:23.333 --> 00:11:25.866 align:middle line:79% position:50% size:58% +is great because it +helps you answer things + +00:11:25.933 --> 00:11:28.266 align:middle line:85% position:50% size:55% +in a very precise way. + +00:11:28.333 --> 00:11:33.066 align:middle line:79% position:50% size:60% +If you need to know what +something means, how to + +00:11:33.133 --> 00:11:36.033 align:middle line:79% position:50% size:53% +interpret a thing, +the ACT Rules offer a + +00:11:36.100 --> 00:11:39.033 align:middle line:85% position:50% size:63% +very precise description. + +00:11:39.100 --> 00:11:45.166 align:middle line:79% position:50% size:78% +So there's no more, well, it +depends kind of thing going on. + +00:11:45.233 --> 00:11:48.033 align:middle line:79% position:50% size:60% +ACT Rules are very clear +on when something should + +00:11:48.100 --> 00:11:50.066 align:middle line:79% position:50% size:55% +pass and when +something should fail. + +00:11:50.133 --> 00:11:57.799 align:middle line:79% position:50% size:58% +ACT Rules also document +assistive technologies. + +00:11:57.866 --> 00:12:03.966 align:middle line:79% position:50% size:65% +If something doesn't work +in some screen readers but + +00:12:04.033 --> 00:12:06.500 align:middle line:79% position:50% size:63% +it does in others, you +can find that information + +00:12:06.566 --> 00:12:10.100 align:middle line:79% position:50% size:55% +in the rules and this +helps you frame how to + +00:12:10.166 --> 00:12:12.299 align:middle line:85% position:50% size:58% +understand the results. + +00:12:12.366 --> 00:12:15.533 align:middle line:79% position:50% size:63% +So, in some cases you may +still see an issue if you + +00:12:15.600 --> 00:12:18.733 align:middle line:79% position:50% size:60% +try something with a +particular screen reader + +00:12:18.799 --> 00:12:22.066 align:middle line:79% position:50% size:45% +that you don't +see in the report. + +00:12:22.133 --> 00:12:24.866 align:middle line:79% position:50% size:58% +That really helps +you understand what the + +00:12:24.933 --> 00:12:26.100 align:middle line:85% position:50% size:55% +results actually mean. + +00:12:26.166 --> 00:12:30.966 align:middle line:79% position:50% size:53% +Then they +document assumptions. + +00:12:31.033 --> 00:12:37.600 align:middle line:79% position:50% size:63% +So, if there are very +unlikely edge cases or if + +00:12:37.666 --> 00:12:40.500 align:middle line:79% position:50% size:65% +there's a particular +interpretation that may be + +00:12:40.566 --> 00:12:47.166 align:middle line:79% position:50% size:65% +controversial or that is +unusual, then that will be + +00:12:47.233 --> 00:12:49.766 align:middle line:79% position:50% size:58% +documented in the +rule itself so you will + +00:12:49.833 --> 00:12:54.399 align:middle line:79% position:50% size:60% +understand why is the +result the way it is and + +00:12:54.466 --> 00:12:59.399 align:middle line:79% position:50% size:60% +that really helps you +frame the results again. + +00:12:59.466 --> 00:13:04.200 align:middle line:79% position:50% size:58% +And then lastly but not +least important either, + +00:13:04.266 --> 00:13:08.166 align:middle line:79% position:50% size:60% +diagnostic for +tools and methodologies. + +00:13:08.233 --> 00:13:12.733 align:middle line:79% position:50% size:60% +We want ACT Rules to +be something that can be + +00:13:12.799 --> 00:13:17.233 align:middle line:79% position:50% size:60% +implemented irrespective +of whether or not you're + +00:13:17.299 --> 00:13:20.433 align:middle line:79% position:50% size:53% +doing it manually or +if you're automating. + +00:13:20.500 --> 00:13:23.899 align:middle line:79% position:50% size:43% +That really +shouldn't matter. + +00:13:23.966 --> 00:13:26.399 align:middle line:79% position:50% size:63% +It shouldn't matter if +you're using a particular + +00:13:26.466 --> 00:13:31.799 align:middle line:79% position:50% size:78% +technology stack in your tool +or some other technology stack. + +00:13:31.866 --> 00:13:34.933 align:middle line:79% position:50% size:58% +These are written in +a way that everybody is + +00:13:35.000 --> 00:13:36.799 align:middle line:85% position:50% size:70% +doing accessibility testing. + +00:13:36.866 --> 00:13:39.266 align:middle line:79% position:50% size:58% +Whether it be fully +automated or completely + +00:13:39.333 --> 00:13:42.233 align:middle line:79% position:50% size:55% +manual or somewhere in +between, you should be + +00:13:42.299 --> 00:13:44.200 align:middle line:85% position:50% size:60% +able to implement these. + +00:13:44.266 --> 00:13:50.166 align:middle line:79% position:50% size:60% +I think with that, I am +passing it on to Kasper. + +00:13:50.233 --> 00:13:53.266 align:middle line:79% position:50% size:43% +>> KASPER ISAGER: +Alrighty. + +00:13:53.333 --> 00:13:57.000 align:middle line:79% position:50% size:63% +So, I will just briefly +talk about the rules that + +00:13:57.066 --> 00:14:00.233 align:middle line:79% position:50% size:63% +we developed as part of +the project and then also + +00:14:00.299 --> 00:14:04.366 align:middle line:79% position:50% size:65% +briefly touch on how these +were actually developed. + +00:14:04.433 --> 00:14:09.266 align:middle line:79% position:50% size:63% +So, just for an overview, +we initially set out to + +00:14:09.333 --> 00:14:13.366 align:middle line:79% position:50% size:65% +develop a total of seventy +of these ACT Rules, of + +00:14:13.433 --> 00:14:16.500 align:middle line:79% position:50% size:60% +which we wanted thirty +to be fully automatable, + +00:14:16.566 --> 00:14:18.866 align:middle line:79% position:50% size:60% +twenty-five of them were +to be semi-automatable, + +00:14:18.933 --> 00:14:23.799 align:middle line:79% position:50% size:60% +and fifteen were to be +completely manual rules. + +00:14:23.866 --> 00:14:27.399 align:middle line:79% position:50% size:58% +And we started out by +trying to cover as much + +00:14:27.466 --> 00:14:31.133 align:middle line:79% position:50% size:70% +crowd as possible in +terms of the type of content + +00:14:31.200 --> 00:14:32.600 align:middle line:85% position:50% size:65% +that the rules dealt with. + +00:14:32.666 --> 00:14:35.799 align:middle line:79% position:50% size:68% +So, we were looking at +the text content, graphics, + +00:14:35.866 --> 00:14:40.000 align:middle line:79% position:50% size:63% +audio/visual media, all +the stuff that was really + +00:14:40.066 --> 00:14:44.233 align:middle line:79% position:50% size:65% +based on, in many cases, +the existing rulesets that + +00:14:44.299 --> 00:14:47.000 align:middle line:79% position:50% size:55% +the individual project +partners already had + +00:14:47.066 --> 00:14:49.633 align:middle line:79% position:50% size:58% +implemented in +their individual tools. + +00:14:49.700 --> 00:14:53.100 align:middle line:79% position:50% size:75% +And then later on as the +project progressed, we started + +00:14:53.166 --> 00:14:55.333 align:middle line:79% position:50% size:63% +seeing more and more +specialized rules focused + +00:14:55.399 --> 00:14:58.500 align:middle line:79% position:50% size:63% +on increasingly narrow +aspects of their content. + +00:14:58.566 --> 00:15:01.966 align:middle line:79% position:50% size:60% +We started looking at +stuff like orientational + +00:15:02.033 --> 00:15:09.100 align:middle line:79% position:50% size:70% +arcs implemented using +CSS and also things like CSS + +00:15:09.166 --> 00:15:13.100 align:middle line:79% position:50% size:70% +declarations that couldn't +be overwritten by the users. + +00:15:13.166 --> 00:15:16.500 align:middle line:79% position:50% size:63% +So, much more specialized +failure cases than the + +00:15:16.566 --> 00:15:19.233 align:middle line:79% position:50% size:45% +stuff that we +initially covered. + +00:15:19.299 --> 00:15:21.733 align:middle line:79% position:50% size:60% +We also ended up +developing several rules + +00:15:21.799 --> 00:15:26.166 align:middle line:79% position:50% size:60% +that didn't relate +directly to the WCAG but + +00:15:26.233 --> 00:15:29.200 align:middle line:79% position:50% size:63% +we are looking at things +like WAI-ARIA conformance + +00:15:29.266 --> 00:15:32.033 align:middle line:79% position:50% size:60% +and also its associated +authoring practices that + +00:15:32.100 --> 00:15:36.833 align:middle line:79% position:50% size:53% +define best practices +for use of WAI-ARIA. + +00:15:36.899 --> 00:15:39.233 align:middle line:79% position:50% size:60% +And with that, I +will cue the next slide. + +00:15:43.466 --> 00:15:44.666 align:middle line:85% position:50% size:25% +Thank you. + +00:15:44.733 --> 00:15:47.066 align:middle line:79% position:50% size:65% +So, as already mentioned, +these rules were developed + +00:15:47.133 --> 00:15:51.633 align:middle line:79% position:50% size:60% +in an open W3C Community +Group, and within that + +00:15:51.700 --> 00:15:53.933 align:middle line:79% position:50% size:60% +group the first thing we +of course had to decide, + +00:15:54.000 --> 00:15:56.899 align:middle line:79% position:50% size:65% +also among the project +partners, were which rules + +00:15:56.966 --> 00:15:59.433 align:middle line:79% position:50% size:45% +we actually +wanted to develop. + +00:15:59.500 --> 00:16:02.233 align:middle line:79% position:50% size:50% +And for this, we had +a list of a defined + +00:16:02.299 --> 00:16:05.666 align:middle line:79% position:50% size:65% +selection criteria so we +would look at things like, + +00:16:05.733 --> 00:16:10.500 align:middle line:79% position:50% size:58% +so, a proposed rule for +example or how many new + +00:16:10.566 --> 00:16:12.566 align:middle line:79% position:50% size:55% +additional success +criteria from the WCAG + +00:16:12.633 --> 00:16:15.633 align:middle line:79% position:50% size:73% +does it cover that we haven't +already covered, and what is + +00:16:15.700 --> 00:16:19.600 align:middle line:79% position:50% size:73% +the, sort of the options both +across the project partners + +00:16:19.666 --> 00:16:23.333 align:middle line:79% position:50% size:65% +and how easy would it be +to implement in our tools, + +00:16:23.399 --> 00:16:26.766 align:middle line:79% position:50% size:70% +and also how likely was +it that this was a rule that + +00:16:26.833 --> 00:16:29.933 align:middle line:79% position:50% size:50% +would be picked +up by the community. + +00:16:30.000 --> 00:16:33.166 align:middle line:79% position:50% size:65% +And then once we had +selected rules to develop, + +00:16:33.233 --> 00:16:36.966 align:middle line:79% position:50% size:63% +we would incubate them in +this open community group + +00:16:37.033 --> 00:16:39.866 align:middle line:79% position:50% size:63% +with input both from the +project partners but also + +00:16:39.933 --> 00:16:40.833 align:middle line:85% position:50% size:50% +the wider community. + +00:16:40.899 --> 00:16:42.833 align:middle line:79% position:50% size:60% +And this is really when +the bulk of the work was + +00:16:42.899 --> 00:16:46.566 align:middle line:79% position:50% size:60% +spent, with the biggest +and most important point + +00:16:46.633 --> 00:16:49.566 align:middle line:79% position:50% size:58% +probably being +reaching a consensus on + +00:16:49.633 --> 00:16:53.566 align:middle line:79% position:50% size:65% +interpretation of whatever +success criteria it was + +00:16:53.633 --> 00:16:56.766 align:middle line:79% position:50% size:63% +that the rule is +supposed to test and also + +00:16:56.833 --> 00:16:59.533 align:middle line:79% position:50% size:65% +researching, like, the +real world implications of + +00:16:59.600 --> 00:17:03.266 align:middle line:79% position:50% size:63% +what the rule would look +at, figuring out so under + +00:17:03.333 --> 00:17:06.299 align:middle line:79% position:50% size:65% +what assumptions would the +thing that the rule looked + +00:17:06.366 --> 00:17:09.266 align:middle line:85% position:50% size:63% +at actually be a problem. + +00:17:09.333 --> 00:17:11.200 align:middle line:79% position:50% size:60% +And in case those +assumptions didn't hold, + +00:17:11.266 --> 00:17:15.000 align:middle line:79% position:50% size:63% +then would there actually +be an actual problem? + +00:17:15.066 --> 00:17:19.833 align:middle line:79% position:50% size:58% +So, really finding out +where, you know, theory + +00:17:19.900 --> 00:17:23.200 align:middle line:79% position:50% size:48% +and reality aligned +and agreeing on an + +00:17:23.266 --> 00:17:25.233 align:middle line:79% position:50% size:65% +interpretation that will +then form the basis of the + +00:17:25.299 --> 00:17:27.033 align:middle line:85% position:50% size:60% +rules that we developed. + +00:17:27.099 --> 00:17:30.366 align:middle line:79% position:50% size:65% +And once we had the +rules developed, they were + +00:17:30.433 --> 00:17:32.833 align:middle line:79% position:50% size:65% +implemented by the project +partners in our various + +00:17:32.900 --> 00:17:36.599 align:middle line:79% position:50% size:65% +tools and also validated +by another project partner + +00:17:36.666 --> 00:17:39.566 align:middle line:79% position:50% size:55% +on this fixed set of +real web pages that we + +00:17:39.633 --> 00:17:42.666 align:middle line:85% position:50% size:60% +had selected beforehand. + +00:17:42.733 --> 00:17:47.333 align:middle line:79% position:50% size:63% +And then once implemented +and validated and matured + +00:17:47.400 --> 00:17:50.799 align:middle line:79% position:50% size:65% +in the community, we would +ship them off to the ACT + +00:17:50.866 --> 00:17:56.000 align:middle line:79% position:50% size:60% +Task Force to eventually +be included as official + +00:17:56.066 --> 00:17:58.700 align:middle line:79% position:50% size:48% +live resources +on the WAI webpage. + +00:18:02.400 --> 00:18:04.933 align:middle line:79% position:50% size:53% +And with that, I will +pass it on to Carlos. + +00:18:08.533 --> 00:18:10.066 align:middle line:79% position:50% size:45% +>> CARLOS DUARTE: +Thank you, Kasper. + +00:18:10.133 --> 00:18:15.966 align:middle line:79% position:50% size:58% +I'm going to start by +talking a bit about how + +00:18:16.033 --> 00:18:18.233 align:middle line:79% position:50% size:43% +does this +impact you, okay. + +00:18:18.299 --> 00:18:23.066 align:middle line:79% position:50% size:68% +So, what does it mean +for you, for the community, + +00:18:23.133 --> 00:18:27.866 align:middle line:79% position:50% size:60% +that we have +developed seventy rules? + +00:18:27.933 --> 00:18:33.599 align:middle line:79% position:50% size:65% +So, first it means that we +now have more than seventy + +00:18:33.666 --> 00:18:38.200 align:middle line:79% position:50% size:73% +different aspects about WCAG +that at least three different + +00:18:38.266 --> 00:18:40.766 align:middle line:79% position:50% size:68% +organizations agree on +how they need to be tested. + +00:18:40.833 --> 00:18:44.599 align:middle line:79% position:50% size:63% +Okay, so I think this is +already very significant. + +00:18:44.666 --> 00:18:47.400 align:middle line:79% position:50% size:53% +And when I say at +least three different + +00:18:47.466 --> 00:18:51.333 align:middle line:79% position:50% size:55% +organizations, I'm not +saying three different + +00:18:51.400 --> 00:18:53.799 align:middle line:79% position:50% size:55% +project partners, and +this is something that + +00:18:53.866 --> 00:18:55.633 align:middle line:85% position:50% size:63% +Kasper just talked about. + +00:18:55.700 --> 00:18:59.033 align:middle line:79% position:50% size:65% +So, because the main work +we've done in this project + +00:18:59.099 --> 00:19:02.166 align:middle line:79% position:50% size:60% +was done in the scope of +the ACT Community Group, + +00:19:02.233 --> 00:19:05.700 align:middle line:79% position:50% size:65% +which means that it was +an open group and its work + +00:19:05.766 --> 00:19:07.833 align:middle line:85% position:50% size:58% +was publicly available. + +00:19:07.900 --> 00:19:10.700 align:middle line:79% position:50% size:45% +So, we are not +just three project + +00:19:10.766 --> 00:19:12.299 align:middle line:85% position:50% size:70% +partners reaching consensus. + +00:19:12.366 --> 00:19:15.233 align:middle line:85% position:50% size:53% +It's wider than that. + +00:19:15.299 --> 00:19:19.133 align:middle line:79% position:50% size:65% +And there were involvement +at different levels from + +00:19:19.200 --> 00:19:21.133 align:middle line:79% position:50% size:60% +these organizations +outside the project that + +00:19:21.200 --> 00:19:24.566 align:middle line:79% position:50% size:60% +fortunately was not +just at the rule writing + +00:19:24.633 --> 00:19:25.733 align:middle line:85% position:50% size:50% +and reviewing level. + +00:19:25.799 --> 00:19:30.200 align:middle line:79% position:50% size:63% +So, not surprisingly, the +project partners included + +00:19:30.266 --> 00:19:34.833 align:middle line:79% position:50% size:75% +in their products, the ACT +Rules that we were developing. + +00:19:34.900 --> 00:19:38.866 align:middle line:79% position:50% size:68% +But I think we can say that +we are really happy to see + +00:19:38.933 --> 00:19:41.500 align:middle line:79% position:50% size:60% +that other organizations +started to do the same. + +00:19:41.566 --> 00:19:45.633 align:middle line:79% position:50% size:63% +So, the other two vendors +have now products that + +00:19:45.700 --> 00:19:49.833 align:middle line:79% position:50% size:60% +include conformance +testing using ACT Rules, + +00:19:49.900 --> 00:19:54.333 align:middle line:79% position:50% size:58% +manual testing +methodologies that also + +00:19:54.400 --> 00:19:57.166 align:middle line:85% position:50% size:45% +include ACT Rules. + +00:19:57.233 --> 00:20:01.366 align:middle line:79% position:50% size:63% +So, from this you can +understand that ACT Rules + +00:20:01.433 --> 00:20:05.733 align:middle line:79% position:50% size:63% +already have an impact on +several tools, on several + +00:20:05.799 --> 00:20:09.833 align:middle line:79% position:50% size:65% +methodologies, and let me +stress this out again, not + +00:20:09.900 --> 00:20:12.700 align:middle line:79% position:50% size:55% +only from the tools +and methodologies made + +00:20:12.766 --> 00:20:16.733 align:middle line:79% position:50% size:43% +available by the +project partners. + +00:20:16.799 --> 00:20:20.400 align:middle line:79% position:50% size:65% +So, if you use one of +these tools or if you have + +00:20:20.466 --> 00:20:23.466 align:middle line:79% position:50% size:53% +your projects checked +with one of those + +00:20:23.533 --> 00:20:27.400 align:middle line:79% position:50% size:55% +methodologies, then +ACT Rules have already + +00:20:27.466 --> 00:20:32.700 align:middle line:79% position:50% size:60% +impacted you, and we can +also say - you will be + +00:20:32.766 --> 00:20:36.866 align:middle line:79% position:50% size:65% +able to hear about this +in the next presentation - + +00:20:36.933 --> 00:20:40.799 align:middle line:79% position:50% size:73% +that these tools and +these methodologies are being + +00:20:40.866 --> 00:20:43.599 align:middle line:79% position:50% size:53% +used by monitoring +bodies across Europe. + +00:20:43.666 --> 00:20:47.400 align:middle line:79% position:50% size:60% +So, we are already +witnessing the impact of + +00:20:47.466 --> 00:20:51.533 align:middle line:79% position:50% size:35% +ACT Rules at +a large scale. + +00:20:51.599 --> 00:20:54.299 align:middle line:79% position:50% size:55% +And we believe that +ACT Rules will play an + +00:20:54.366 --> 00:20:57.033 align:middle line:79% position:50% size:55% +important role for the +European monitoring + +00:20:57.099 --> 00:20:59.599 align:middle line:85% position:50% size:55% +efforts as Wilco said. + +00:20:59.666 --> 00:21:03.099 align:middle line:79% position:50% size:63% +So, given the consistency +that the ACT Rules offer, + +00:21:03.166 --> 00:21:06.400 align:middle line:79% position:50% size:58% +even when monitoring +agencies from different + +00:21:06.466 --> 00:21:09.833 align:middle line:79% position:50% size:58% +countries use different +tools, if those tools + +00:21:09.900 --> 00:21:13.533 align:middle line:79% position:50% size:63% +implement ACT Rules, +then their results can be + +00:21:13.599 --> 00:21:16.933 align:middle line:79% position:50% size:55% +compared with a higher +degree of confidence. + +00:21:17.000 --> 00:21:21.366 align:middle line:79% position:50% size:75% +We think this is already a +great start but in the future, + +00:21:21.433 --> 00:21:24.866 align:middle line:79% position:50% size:68% +we can only hope that the +impact will be even bigger. + +00:21:24.933 --> 00:21:28.366 align:middle line:79% position:50% size:60% +So, as also Wilco +mentioned, eleven of the + +00:21:28.433 --> 00:21:32.500 align:middle line:79% position:50% size:65% +ACT Rules developed by the +project and adopted by the + +00:21:32.566 --> 00:21:35.833 align:middle line:79% position:50% size:65% +community have already +been published by the W3C. + +00:21:35.900 --> 00:21:39.633 align:middle line:79% position:50% size:63% +There are more in the +pipeline that should soon + +00:21:39.700 --> 00:21:44.700 align:middle line:79% position:50% size:65% +increase this number and I +think that the interest in + +00:21:44.766 --> 00:21:47.700 align:middle line:79% position:50% size:65% +ACT Rules is only bound +to grow when the community + +00:21:47.766 --> 00:21:51.733 align:middle line:79% position:50% size:50% +starts finding +ACT Rules elsewhere. + +00:21:51.799 --> 00:21:54.933 align:middle line:79% position:50% size:58% +Shadi, can you move to +the next slide, please? + +00:21:58.000 --> 00:21:58.933 align:middle line:85% position:50% size:18% +Thanks. + +00:21:59.000 --> 00:22:03.866 align:middle line:79% position:50% size:65% +So, you have a screenshot, +so a sneak peek at the + +00:22:03.933 --> 00:22:12.500 align:middle line:79% position:50% size:73% +Understanding document for +WCAG 2.2 soon to be released. + +00:22:12.566 --> 00:22:17.500 align:middle line:79% position:50% size:65% +And you can find there - +yes, thanks, Shadi - a new + +00:22:17.566 --> 00:22:21.400 align:middle line:79% position:50% size:65% +section on Understanding +ACT Rules and we have this + +00:22:21.466 --> 00:22:24.166 align:middle line:79% position:50% size:65% +section at the same level +that we have currently for + +00:22:24.233 --> 00:22:28.099 align:middle line:79% position:50% size:70% +the Understanding Techniques +for WCAG's success criteria. + +00:22:28.166 --> 00:22:31.133 align:middle line:79% position:50% size:65% +So, this is really +exciting and we are really + +00:22:31.200 --> 00:22:36.133 align:middle line:79% position:50% size:55% +seeing ACT Rules being +adopted and promoted. + +00:22:36.200 --> 00:22:40.933 align:middle line:79% position:50% size:63% +Okay, so now I want to +talk to you about how you + +00:22:41.000 --> 00:22:44.866 align:middle line:79% position:50% size:60% +can contribute to +keep ACT Rules going on. + +00:22:44.933 --> 00:22:48.500 align:middle line:79% position:50% size:65% +So, we like to believe +that this was a successful + +00:22:48.566 --> 00:22:53.166 align:middle line:79% position:50% size:65% +project and that we really +kickstarted the work on + +00:22:53.233 --> 00:22:55.433 align:middle line:79% position:50% size:63% +ACT Rules but now it's up +to you, to the community + +00:22:55.500 --> 00:22:57.500 align:middle line:79% position:50% size:38% +to keep this +momentum going. + +00:22:57.566 --> 00:23:01.233 align:middle line:79% position:50% size:55% +And fortunately, there +are many ways in which + +00:23:01.299 --> 00:23:02.200 align:middle line:85% position:50% size:48% +you can contribute. + +00:23:02.266 --> 00:23:05.866 align:middle line:79% position:50% size:60% +For instance, if you are +a tool or a methodology + +00:23:05.933 --> 00:23:09.000 align:middle line:79% position:50% size:65% +developer, you can +start by checking how well + +00:23:09.066 --> 00:23:12.966 align:middle line:79% position:50% size:60% +aligned your project is +to the current consensus + +00:23:13.033 --> 00:23:15.066 align:middle line:85% position:50% size:58% +expressed in ACT Rules. + +00:23:15.133 --> 00:23:17.599 align:middle line:79% position:50% size:63% +So you can read the rules +that are related to the + +00:23:17.666 --> 00:23:19.366 align:middle line:85% position:50% size:60% +checks that you support. + +00:23:19.433 --> 00:23:24.200 align:middle line:79% position:50% size:70% +You can see how well you do +on the published test cases. + +00:23:24.266 --> 00:23:27.966 align:middle line:79% position:50% size:60% +And you don't agree with +some aspect of the rule? + +00:23:28.033 --> 00:23:29.000 align:middle line:85% position:50% size:15% +Great. + +00:23:29.066 --> 00:23:32.466 align:middle line:79% position:50% size:70% +We did try our best but +we know we are not flawless. + +00:23:32.533 --> 00:23:35.266 align:middle line:79% position:50% size:60% +So, raise an issue +reporting what you found + +00:23:35.333 --> 00:23:39.233 align:middle line:79% position:50% size:60% +and it will be discussed +in the community and the + +00:23:39.299 --> 00:23:41.200 align:middle line:79% position:50% size:45% +rule will be +updated if needed. + +00:23:41.266 --> 00:23:44.900 align:middle line:79% position:50% size:38% +It wouldn't be +the first time. + +00:23:44.966 --> 00:23:47.566 align:middle line:79% position:50% size:50% +While you are at it, +you can send us the + +00:23:47.633 --> 00:23:50.766 align:middle line:79% position:50% size:73% +implementation reports of +your tools and methodologies. + +00:23:50.833 --> 00:23:53.966 align:middle line:79% position:50% size:63% +In the ACT Rules website, +we have a section for + +00:23:54.033 --> 00:23:56.366 align:middle line:79% position:50% size:53% +implementations where +all reports that we + +00:23:56.433 --> 00:23:58.299 align:middle line:85% position:50% size:55% +receive are displayed. + +00:23:58.366 --> 00:24:01.366 align:middle line:79% position:50% size:63% +So, this is a great way +to let the community know + +00:24:01.433 --> 00:24:04.633 align:middle line:79% position:50% size:58% +that your products are +supported by ACT Rules. + +00:24:04.700 --> 00:24:08.766 align:middle line:79% position:50% size:60% +And irrespectively of +your developing tools or + +00:24:08.833 --> 00:24:12.933 align:middle line:79% position:50% size:70% +methodologies or being a +user of such or a monitoring + +00:24:13.000 --> 00:24:16.299 align:middle line:79% position:50% size:55% +body or someone +that's responsible for + +00:24:16.366 --> 00:24:19.599 align:middle line:79% position:50% size:65% +publishing accessible +information on the web, be + +00:24:19.666 --> 00:24:23.033 align:middle line:79% position:50% size:60% +happy because there are +other ways you can help. + +00:24:23.099 --> 00:24:25.766 align:middle line:79% position:50% size:78% +We are always looking for more +people to help reviewing rules. + +00:24:25.833 --> 00:24:28.233 align:middle line:79% position:50% size:55% +So, it's great to have +people with different + +00:24:28.299 --> 00:24:31.833 align:middle line:79% position:50% size:68% +experience and perspectives +contributing to this work. + +00:24:31.900 --> 00:24:34.700 align:middle line:79% position:50% size:63% +If you have an idea for +a rule, you can write it. + +00:24:34.766 --> 00:24:37.400 align:middle line:79% position:50% size:50% +Don't worry if +you feel overwhelmed + +00:24:37.466 --> 00:24:38.866 align:middle line:85% position:50% size:53% +about writing a rule. + +00:24:38.933 --> 00:24:41.200 align:middle line:79% position:50% size:53% +You can team up with +others and do it as a + +00:24:41.266 --> 00:24:43.000 align:middle line:85% position:50% size:55% +collaborative project. + +00:24:43.066 --> 00:24:45.700 align:middle line:79% position:50% size:63% +That's what we are all +doing here, collaborating + +00:24:45.766 --> 00:24:48.166 align:middle line:79% position:50% size:40% +to make the web +more accessible. + +00:24:48.233 --> 00:24:52.400 align:middle line:79% position:50% size:65% +And to do any of this, +you don't have to be a W3C + +00:24:52.466 --> 00:24:56.466 align:middle line:79% position:50% size:63% +member, but if you are or +want to become one, then + +00:24:56.533 --> 00:24:58.766 align:middle line:79% position:50% size:63% +you can actively +contribute to support the + +00:24:58.833 --> 00:25:03.000 align:middle line:79% position:50% size:73% +adoption of more rules +into WCAG 2.2 and potentially + +00:25:03.066 --> 00:25:07.466 align:middle line:85% position:50% size:38% +the new WCAG 3. + +00:25:07.533 --> 00:25:11.366 align:middle line:79% position:50% size:70% +So, Shadi, I think we +can move to the final slide. + +00:25:11.433 --> 00:25:18.200 align:middle line:79% position:50% size:58% +And you can find more +details about ACT Rules + +00:25:18.266 --> 00:25:20.466 align:middle line:79% position:50% size:48% +and how you can +join this community + +00:25:20.533 --> 00:25:28.533 align:middle line:85% position:50% size:58% +at act-rules.github.io. + +00:25:28.599 --> 00:25:32.500 align:middle line:79% position:50% size:65% +And thanks for listening +and I think Shadi, now you + +00:25:32.566 --> 00:25:35.066 align:middle line:79% position:50% size:43% +want to take some +questions, right? + +00:25:35.133 --> 00:25:37.633 align:middle line:79% position:50% size:63% +>> SHADI ABOU-ZAHRA: Yes, +questions are coming in. + +00:25:37.700 --> 00:25:40.233 align:middle line:79% position:50% size:65% +Thank you, everybody who +is contributing questions. + +00:25:40.299 --> 00:25:44.066 align:middle line:79% position:50% size:80% +If you have questions, now +is the time in the Q&A function. + +00:25:44.133 --> 00:25:46.266 align:middle line:79% position:50% size:60% +Before we get to that, +just to get a little bit + +00:25:46.333 --> 00:25:53.700 align:middle line:79% position:50% size:65% +of a feel, how many people +are subscribed to the + +00:25:53.766 --> 00:25:56.766 align:middle line:79% position:50% size:50% +community group and +roughly how many are + +00:25:56.833 --> 00:26:02.466 align:middle line:79% position:50% size:63% +active in the calls and, +you know, the active part + +00:26:02.533 --> 00:26:05.400 align:middle line:79% position:50% size:40% +of the work, not +just subscribed? + +00:26:05.466 --> 00:26:08.133 align:middle line:79% position:50% size:48% +Who wants to +take that question? + +00:26:08.200 --> 00:26:09.900 align:middle line:79% position:50% size:38% +>> WILCO FIERS: +I'll take it. + +00:26:09.966 --> 00:26:13.900 align:middle line:79% position:50% size:58% +It depends a little bit +because there are two + +00:26:13.966 --> 00:26:19.133 align:middle line:79% position:50% size:68% +groups, one in W3C itself +and a Community Group which + +00:26:19.200 --> 00:26:24.000 align:middle line:79% position:50% size:70% +has no official standing at +the W3C but is hosted by it. + +00:26:24.066 --> 00:26:28.733 align:middle line:79% position:50% size:60% +The Community Group has, +I believe, around ninety + +00:26:28.799 --> 00:26:31.266 align:middle line:85% position:50% size:45% +members right now. + +00:26:31.333 --> 00:26:35.266 align:middle line:79% position:50% size:60% +As for active +participants, again that + +00:26:35.333 --> 00:26:37.566 align:middle line:79% position:50% size:58% +depends on how +active you mean active. + +00:26:37.633 --> 00:26:42.233 align:middle line:79% position:50% size:63% +I think right now we have +ten to twelve people that + +00:26:42.299 --> 00:26:49.166 align:middle line:79% position:50% size:63% +regularly attend meetings +like every month, every + +00:26:49.233 --> 00:26:53.533 align:middle line:79% position:50% size:50% +twice a month, +something like that. + +00:26:53.599 --> 00:26:55.900 align:middle line:79% position:50% size:60% +There's a whole bunch of +people that are slightly + +00:26:55.966 --> 00:27:01.533 align:middle line:79% position:50% size:68% +less active but that +still regularly contribute. + +00:27:01.599 --> 00:27:04.133 align:middle line:79% position:50% size:58% +As a rule comes up that +they are interested in, + +00:27:04.200 --> 00:27:05.099 align:middle line:85% position:50% size:55% +they'll drop a review. + +00:27:05.166 --> 00:27:09.000 align:middle line:79% position:50% size:60% +That happens quite a +lot and I frequently get + +00:27:09.066 --> 00:27:12.433 align:middle line:79% position:50% size:58% +surprised by new people +who have come out of + +00:27:12.500 --> 00:27:14.700 align:middle line:79% position:50% size:68% +nowhere and +submitted some information. + +00:27:17.099 --> 00:27:18.033 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Thanks. + +00:27:18.099 --> 00:27:19.033 align:middle line:79% position:50% size:58% +Yeah, I just wanted to +give a little bit of an + +00:27:19.099 --> 00:27:22.400 align:middle line:79% position:50% size:58% +idea and people are, as +you were saying, Wilco, + +00:27:22.466 --> 00:27:24.000 align:middle line:79% position:50% size:45% +participating +in different ways. + +00:27:24.066 --> 00:27:26.500 align:middle line:79% position:50% size:80% +Some people are attending +the calls and discussing things. + +00:27:26.566 --> 00:27:29.200 align:middle line:79% position:50% size:60% +Others completely +asynchronously, they are + +00:27:29.266 --> 00:27:33.700 align:middle line:85% position:50% size:65% +using the GitHub platform. + +00:27:33.766 --> 00:27:37.400 align:middle line:79% position:50% size:65% +>> WILCO FIERS: Yeah, +it's - it takes relatively + +00:27:37.466 --> 00:27:39.766 align:middle line:79% position:50% size:58% +little time for +anybody to participate. + +00:27:39.833 --> 00:27:42.500 align:middle line:79% position:50% size:70% +You can spend as little +or as much time as you want. + +00:27:42.566 --> 00:27:46.366 align:middle line:79% position:50% size:65% +I do also think it's worth +mentioning that just from + +00:27:46.433 --> 00:27:49.599 align:middle line:79% position:50% size:63% +the people that have +joined over the last year + +00:27:49.666 --> 00:27:54.000 align:middle line:79% position:50% size:63% +maybe, year and half, I +frequently hear that they + +00:27:54.066 --> 00:27:58.566 align:middle line:79% position:50% size:70% +join and they find it a lot +of fun and they learn a lot. + +00:27:58.633 --> 00:28:01.599 align:middle line:79% position:50% size:60% +It is a group where you +get to learn quite a lot + +00:28:01.666 --> 00:28:04.033 align:middle line:85% position:50% size:65% +of in-depth accessibility. + +00:28:04.099 --> 00:28:07.099 align:middle line:79% position:50% size:68% +So, if you're interested +in the nitty gritty details + +00:28:07.166 --> 00:28:09.200 align:middle line:79% position:50% size:53% +of accessibility, +this is really a good + +00:28:09.266 --> 00:28:11.099 align:middle line:79% position:50% size:45% +group for you +to participate in. + +00:28:11.166 --> 00:28:13.933 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Great. + +00:28:14.000 --> 00:28:16.666 align:middle line:79% position:50% size:60% +So, one of the questions +that we have is - I'll + +00:28:16.733 --> 00:28:20.599 align:middle line:79% position:50% size:65% +read it aloud - WCAG is +supposed to be interpreted + +00:28:20.666 --> 00:28:23.333 align:middle line:79% position:50% size:48% +to be relevant in +different contexts. + +00:28:23.400 --> 00:28:25.933 align:middle line:79% position:50% size:75% +In one country, users +might be using older assistive + +00:28:26.000 --> 00:28:28.799 align:middle line:79% position:50% size:68% +technology for example and +it would be relevant to use + +00:28:28.866 --> 00:28:31.466 align:middle line:79% position:50% size:60% +all the techniques +to meet the requirement. + +00:28:31.533 --> 00:28:33.833 align:middle line:79% position:50% size:63% +Then in a country where +all users have the latest + +00:28:33.900 --> 00:28:37.666 align:middle line:79% position:50% size:65% +versions of assistive +technology, in practice it + +00:28:37.733 --> 00:28:41.400 align:middle line:79% position:50% size:65% +means a solution that is +good enough in Germany for + +00:28:41.466 --> 00:28:45.400 align:middle line:79% position:50% size:70% +example, might not be good +enough in Italy for example. + +00:28:45.466 --> 00:28:46.500 align:middle line:85% position:50% size:43% +And that is fine. + +00:28:46.566 --> 00:28:49.166 align:middle line:79% position:50% size:60% +So, how do the ACT Rules +take this into account? + +00:28:54.933 --> 00:28:56.533 align:middle line:85% position:50% size:20% +Silence? + +00:28:56.599 --> 00:28:58.533 align:middle line:79% position:50% size:65% +>> WILCO FIERS: +You want to pick somebody? + +00:28:58.599 --> 00:29:01.066 align:middle line:79% position:50% size:63% +I mean I can answer all +of them if you want, but. + +00:29:01.133 --> 00:29:02.400 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Go ahead. + +00:29:02.466 --> 00:29:06.400 align:middle line:79% position:50% size:70% +>> WILCO FIERS: Okay, so, +it's a really good question. + +00:29:06.466 --> 00:29:07.233 align:middle line:85% position:50% size:25% +Thank you. + +00:29:07.299 --> 00:29:11.133 align:middle line:79% position:50% size:40% +How do ACT Rules +deal with that? + +00:29:11.200 --> 00:29:13.799 align:middle line:79% position:50% size:53% +ACT Rules have an +accessibility support + +00:29:13.866 --> 00:29:18.466 align:middle line:79% position:50% size:60% +section, so if we know +about these differences, + +00:29:18.533 --> 00:29:21.400 align:middle line:79% position:50% size:53% +we will mention +them in that section. + +00:29:21.466 --> 00:29:24.433 align:middle line:79% position:50% size:65% +You can find information +about things that may work + +00:29:24.500 --> 00:29:28.599 align:middle line:79% position:50% size:50% +slightly differently +depending on which + +00:29:28.666 --> 00:29:30.266 align:middle line:85% position:50% size:65% +technology you were using. + +00:29:30.333 --> 00:29:32.966 align:middle line:79% position:50% size:65% +That information is +documented in the rule and + +00:29:33.033 --> 00:29:37.299 align:middle line:79% position:50% size:65% +if you happen to know any +that isn't, please open an + +00:29:37.366 --> 00:29:38.933 align:middle line:85% position:50% size:63% +issue and we will add it. + +00:29:39.000 --> 00:29:43.466 align:middle line:79% position:50% size:40% +It belongs in +those documents. + +00:29:43.533 --> 00:29:45.233 align:middle line:79% position:50% size:55% +I also think it's +worth mentioning those + +00:29:45.299 --> 00:29:47.566 align:middle line:79% position:50% size:45% +differences aren't +that big anymore. + +00:29:50.266 --> 00:29:53.266 align:middle line:79% position:50% size:60% +The types of assistive +technologies that people + +00:29:53.333 --> 00:29:57.166 align:middle line:79% position:50% size:55% +are using tend to +be international ones. + +00:29:57.233 --> 00:30:00.500 align:middle line:79% position:50% size:63% +There are only a handful +of big players out there, + +00:30:00.566 --> 00:30:03.700 align:middle line:79% position:50% size:55% +so the differences +really aren't that big + +00:30:03.766 --> 00:30:06.000 align:middle line:85% position:50% size:48% +anymore these days. + +00:30:06.066 --> 00:30:11.700 align:middle line:79% position:50% size:63% +What does matter a little +bit is whether or not you + +00:30:11.766 --> 00:30:14.099 align:middle line:85% position:50% size:53% +exclude some of them. + +00:30:14.166 --> 00:30:17.400 align:middle line:79% position:50% size:60% +So, for example, if you +want to exclude a common + +00:30:17.466 --> 00:30:21.799 align:middle line:79% position:50% size:63% +screen reader on the Mac, +you might have slightly + +00:30:21.866 --> 00:30:26.966 align:middle line:79% position:50% size:70% +different tasks then if +you wanted to include those. + +00:30:27.033 --> 00:30:30.799 align:middle line:79% position:50% size:70% +The rule documents this +kind of information for you. + +00:30:30.866 --> 00:30:39.133 align:middle line:79% position:50% size:65% +That freedom is +available within the rule. + +00:30:39.200 --> 00:30:39.799 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Thank you. + +00:30:39.866 --> 00:30:41.799 align:middle line:79% position:50% size:50% +Kasper, I think +this one is for you. + +00:30:41.866 --> 00:30:46.066 align:middle line:79% position:50% size:75% +So, do ACT Rules only +cover accessibility tests that + +00:30:46.133 --> 00:30:49.400 align:middle line:79% position:50% size:53% +can be semi-automated +or fully automated? + +00:30:49.466 --> 00:30:54.099 align:middle line:79% position:50% size:63% +Or are there also +rules that clarify rather + +00:30:54.166 --> 00:30:58.933 align:middle line:79% position:50% size:60% +subjective WCAG criteria +like 2.4.6, adding the + +00:30:59.000 --> 00:31:01.200 align:middle line:79% position:50% size:55% +label that needs to +be manually evaluated? + +00:31:03.733 --> 00:31:05.000 align:middle line:79% position:50% size:65% +>> KASPER ISAGER: +You know, I can definitely + +00:31:05.066 --> 00:31:05.900 align:middle line:85% position:50% size:65% +provide an answer to that. + +00:31:05.966 --> 00:31:09.533 align:middle line:79% position:50% size:60% +And the answer is yes, +we also have affordances + +00:31:09.599 --> 00:31:10.766 align:middle line:85% position:50% size:43% +for manual rules. + +00:31:10.833 --> 00:31:15.533 align:middle line:79% position:50% size:60% +I also mentioned we +did set out to implement + +00:31:15.599 --> 00:31:19.233 align:middle line:79% position:50% size:65% +fifteen fully manual +rules, so rules that could + +00:31:19.299 --> 00:31:22.400 align:middle line:79% position:50% size:60% +not reasonably be +implemented in automated + +00:31:22.466 --> 00:31:25.033 align:middle line:79% position:50% size:55% +tools but could be +implemented as part of + +00:31:25.099 --> 00:31:27.233 align:middle line:85% position:50% size:65% +manual test methodologies. + +00:31:27.299 --> 00:31:30.599 align:middle line:79% position:50% size:58% +I would say, the manual +rules are slightly more + +00:31:30.666 --> 00:31:33.700 align:middle line:79% position:50% size:55% +difficult in the sense +that you still have to + +00:31:33.766 --> 00:31:36.400 align:middle line:79% position:50% size:50% +follow the ACT Rules +from it, of course. + +00:31:36.466 --> 00:31:39.700 align:middle line:79% position:50% size:63% +And the ACT Rules from it +are fairly strict on what + +00:31:39.766 --> 00:31:42.000 align:middle line:79% position:50% size:45% +kind of rules you +all have to write. + +00:31:42.066 --> 00:31:45.533 align:middle line:79% position:50% size:63% +One of the big things +is that the applicability + +00:31:45.599 --> 00:31:48.466 align:middle line:79% position:50% size:60% +section of the rules for +the description of what + +00:31:48.533 --> 00:31:51.799 align:middle line:79% position:50% size:63% +kinds of content the rule +applies to has to be both + +00:31:51.866 --> 00:31:54.599 align:middle line:79% position:50% size:65% +completely +unambiguous and objective. + +00:31:54.666 --> 00:31:57.633 align:middle line:79% position:50% size:53% +And for some of the +success criteria, the + +00:31:57.700 --> 00:32:01.900 align:middle line:79% position:50% size:55% +objectiveness criteria +is really difficult to + +00:32:01.966 --> 00:32:04.233 align:middle line:85% position:50% size:50% +meet, it has proved. + +00:32:04.299 --> 00:32:09.633 align:middle line:79% position:50% size:58% +So, yes, you can author +manual rules using the + +00:32:09.700 --> 00:32:11.066 align:middle line:85% position:50% size:43% +ACT Rules format. + +00:32:11.133 --> 00:32:13.400 align:middle line:79% position:50% size:58% +But it may be slightly +more difficult that you + +00:32:13.466 --> 00:32:15.266 align:middle line:79% position:50% size:58% +still have to +remain objective in the + +00:32:15.333 --> 00:32:18.200 align:middle line:79% position:50% size:55% +applicability section +of the rule and that's + +00:32:18.266 --> 00:32:21.466 align:middle line:79% position:50% size:63% +difficult to do with some +of the more, I would say, + +00:32:21.533 --> 00:32:23.633 align:middle line:79% position:50% size:43% +subjective WCAG +success criteria. + +00:32:28.333 --> 00:32:29.599 align:middle line:79% position:50% size:53% +>> SHADI ABOU-ZAHRA: +Right, but that's - I + +00:32:29.666 --> 00:32:31.266 align:middle line:79% position:50% size:50% +think an important +- thank you, Kasper. + +00:32:31.333 --> 00:32:33.233 align:middle line:79% position:50% size:65% +I think that's an +important question because + +00:32:33.299 --> 00:32:35.466 align:middle line:79% position:50% size:55% +it also came up in the +questions that people + +00:32:35.533 --> 00:32:37.466 align:middle line:79% position:50% size:45% +asked in the +registration form. + +00:32:37.533 --> 00:32:41.200 align:middle line:79% position:50% size:65% +I think a lot of people +think about when they hear + +00:32:41.266 --> 00:32:44.400 align:middle line:79% position:50% size:63% +rules, they automatically +think of automated tools + +00:32:44.466 --> 00:32:46.799 align:middle line:79% position:50% size:60% +only and I think here +it's really important to + +00:32:46.866 --> 00:32:50.133 align:middle line:79% position:50% size:75% +emphasize that these +apply to both manual and not - + +00:32:50.200 --> 00:32:53.200 align:middle line:79% position:50% size:65% +in fact, actually, we +have several rules that we + +00:32:53.266 --> 00:32:56.299 align:middle line:79% position:50% size:55% +created as part of the +project that relate to + +00:32:56.366 --> 00:32:59.599 align:middle line:79% position:50% size:60% +that specific success +criteria on headings and + +00:32:59.666 --> 00:33:02.299 align:middle line:85% position:50% size:63% +we have others on labels. + +00:33:05.866 --> 00:33:10.900 align:middle line:79% position:50% size:58% +So, yes - it is done +and we also have manual + +00:33:10.966 --> 00:33:18.366 align:middle line:79% position:50% size:70% +methodology developers who +have been implementing these + +00:33:18.433 --> 00:33:21.266 align:middle line:79% position:50% size:70% +rules in their methodology, +comparing their methodology. + +00:33:21.333 --> 00:33:24.566 align:middle line:79% position:50% size:65% +Speaking of, this is a +question that also came up + +00:33:24.633 --> 00:33:27.599 align:middle line:85% position:50% size:63% +in the registration form. + +00:33:27.666 --> 00:33:29.799 align:middle line:79% position:50% size:40% +Carlos, maybe +this is for you. + +00:33:29.866 --> 00:33:34.599 align:middle line:79% position:50% size:60% +So, somebody wants to +create an implementation + +00:33:34.666 --> 00:33:38.700 align:middle line:79% position:50% size:63% +report for their tool +or for their methodology. + +00:33:38.766 --> 00:33:39.666 align:middle line:85% position:50% size:50% +How do they do that? + +00:33:39.733 --> 00:33:41.866 align:middle line:79% position:50% size:65% +Can you first explain what +an implementation report + +00:33:41.933 --> 00:33:44.766 align:middle line:79% position:50% size:65% +is and then how they +actually do that, how they + +00:33:44.833 --> 00:33:46.366 align:middle line:85% position:50% size:63% +provide such information? + +00:33:46.433 --> 00:33:50.133 align:middle line:79% position:50% size:43% +>> CARLOS DUARTE: +Yeah, sure. + +00:33:50.200 --> 00:33:53.433 align:middle line:79% position:50% size:60% +So, perhaps I need to +start by providing a bit + +00:33:53.500 --> 00:33:58.233 align:middle line:79% position:50% size:70% +of context on what's inside +an ACT Rule and specifically + +00:33:58.299 --> 00:34:02.066 align:middle line:79% position:50% size:68% +an ACT Rule besides +having an applicability and + +00:34:02.133 --> 00:34:05.266 align:middle line:79% position:50% size:45% +expectation as +a set of examples. + +00:34:05.333 --> 00:34:11.633 align:middle line:79% position:50% size:63% +Some of them are examples +of elements, documents, + +00:34:11.699 --> 00:34:16.433 align:middle line:79% position:50% size:78% +pages that meet the +criteria and so pass the rules. + +00:34:16.500 --> 00:34:20.266 align:middle line:79% position:50% size:63% +Some of them are failed +examples and some of them + +00:34:20.333 --> 00:34:23.500 align:middle line:79% position:50% size:65% +are examples of pieces of +code that are inapplicable + +00:34:23.566 --> 00:34:25.233 align:middle line:85% position:50% size:35% +for that rule. + +00:34:25.300 --> 00:34:29.566 align:middle line:79% position:50% size:63% +So, what we expect to +have in an implementation + +00:34:29.633 --> 00:34:34.666 align:middle line:79% position:50% size:60% +report is how well does +your implementation fair + +00:34:34.733 --> 00:34:36.199 align:middle line:85% position:50% size:58% +against those examples? + +00:34:36.266 --> 00:34:39.900 align:middle line:79% position:50% size:45% +So, does it pass +the test examples? + +00:34:39.966 --> 00:34:42.466 align:middle line:79% position:50% size:40% +Does it fail the +failed examples? + +00:34:42.533 --> 00:34:46.800 align:middle line:79% position:50% size:63% +And does it not consider +the inapplicable examples + +00:34:46.866 --> 00:34:48.566 align:middle line:85% position:50% size:38% +of such a rule? + +00:34:48.633 --> 00:34:51.500 align:middle line:79% position:50% size:53% +So, an implementation +report is basically a + +00:34:51.566 --> 00:34:57.133 align:middle line:79% position:50% size:68% +report of the outcomes of +your implementation for the + +00:34:57.199 --> 00:35:03.333 align:middle line:79% position:50% size:73% +different test cases that +are described in an ACT Rule. + +00:35:03.400 --> 00:35:07.533 align:middle line:79% position:50% size:65% +How can you submit one and +how can you prepare one? + +00:35:07.599 --> 00:35:14.633 align:middle line:79% position:50% size:70% +Well, we expect your reports +to be provided using EARL. + +00:35:14.699 --> 00:35:20.066 align:middle line:79% position:50% size:80% +The Evaluation And Report +Language, serialized in JSON-LD. + +00:35:20.133 --> 00:35:23.266 align:middle line:79% position:50% size:60% +So, if you do provide us +that, we can import that + +00:35:23.333 --> 00:35:28.199 align:middle line:79% position:50% size:60% +into our website and +display it together with + +00:35:28.266 --> 00:35:33.333 align:middle line:79% position:50% size:58% +the current displayed +implementation reports. + +00:35:33.400 --> 00:35:37.133 align:middle line:79% position:50% size:63% +We have a section on +the website, so if you go + +00:35:37.199 --> 00:35:41.266 align:middle line:79% position:50% size:68% +online to the website, +there's a section dedicated + +00:35:41.333 --> 00:35:43.166 align:middle line:79% position:50% size:63% +specifically to +implementations and there + +00:35:43.233 --> 00:35:44.699 align:middle line:79% position:50% size:50% +are instructions +there on how you can + +00:35:44.766 --> 00:35:48.800 align:middle line:85% position:50% size:63% +submit an implementation. + +00:35:48.866 --> 00:35:53.099 align:middle line:79% position:50% size:60% +And we have some tools +that you can use to help + +00:35:53.166 --> 00:35:56.166 align:middle line:79% position:50% size:55% +you prepare an +implementation report. + +00:35:56.233 --> 00:35:59.800 align:middle line:79% position:50% size:73% +We have all the test cases +for all the rules accessible. + +00:35:59.866 --> 00:36:04.833 align:middle line:79% position:50% size:68% +So, it can help you +prepare for implementation. + +00:36:04.900 --> 00:36:07.699 align:middle line:79% position:50% size:55% +If you have some +problems setting up an + +00:36:07.766 --> 00:36:10.699 align:middle line:79% position:50% size:63% +infrastructure for +preparing implementation, + +00:36:10.766 --> 00:36:14.533 align:middle line:79% position:50% size:63% +do reach out to us and we +should be able to assist + +00:36:14.599 --> 00:36:17.500 align:middle line:79% position:50% size:40% +you in that, +in your project. + +00:36:21.199 --> 00:36:22.400 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Thank you, Carlos. + +00:36:22.466 --> 00:36:26.066 align:middle line:79% position:50% size:58% +While I have you on +the line, we're kind of + +00:36:26.133 --> 00:36:27.099 align:middle line:85% position:50% size:50% +running out of time. + +00:36:27.166 --> 00:36:28.866 align:middle line:79% position:50% size:65% +There are more questions +coming so we might need to + +00:36:28.933 --> 00:36:32.666 align:middle line:79% position:50% size:55% +push some of these +towards the end of the + +00:36:32.733 --> 00:36:35.500 align:middle line:85% position:50% size:50% +day just to move on. + +00:36:35.566 --> 00:36:38.933 align:middle line:79% position:50% size:63% +But I think, maybe +briefly, Carlos, this one + +00:36:39.000 --> 00:36:43.933 align:middle line:79% position:50% size:58% +it says, can you give a +concrete example of the + +00:36:44.000 --> 00:36:49.566 align:middle line:79% position:50% size:68% +rule on should we have +alt text in an image if the + +00:36:49.633 --> 00:36:53.066 align:middle line:79% position:50% size:55% +text that is next to +the image is the same? + +00:36:53.133 --> 00:36:55.699 align:middle line:79% position:50% size:53% +I think it would help +to understand how you + +00:36:55.766 --> 00:36:56.933 align:middle line:85% position:50% size:58% +choose a specific rule. + +00:36:57.000 --> 00:37:01.933 align:middle line:79% position:50% size:63% +I think here, if you can +explain the test cases in + +00:37:02.000 --> 00:37:06.733 align:middle line:79% position:50% size:63% +the context here that are +part of the rules to help + +00:37:06.800 --> 00:37:09.300 align:middle line:79% position:50% size:60% +you understand what the +rule actually refers to. + +00:37:13.733 --> 00:37:19.133 align:middle line:79% position:50% size:65% +>> CARLOS DUARTE: Wilco +you seem to have the rules + +00:37:19.199 --> 00:37:21.566 align:middle line:79% position:50% size:35% +in your head +better than I. + +00:37:21.633 --> 00:37:24.900 align:middle line:79% position:50% size:65% +Which - this is definitely +coming from the example + +00:37:24.966 --> 00:37:26.800 align:middle line:79% position:50% size:45% +that you gave in +your presentation. + +00:37:26.866 --> 00:37:29.633 align:middle line:85% position:50% size:48% +Can you point me -? + +00:37:29.699 --> 00:37:30.966 align:middle line:79% position:50% size:60% +>> SHADI ABOU-ZAHRA: +Sorry, we don't actually + +00:37:31.033 --> 00:37:31.833 align:middle line:79% position:50% size:40% +have the time +to go into this. + +00:37:31.900 --> 00:37:33.000 align:middle line:79% position:50% size:43% +>> CARLOS DUARTE: +Okay. + +00:37:33.066 --> 00:37:34.566 align:middle line:79% position:50% size:75% +>> SHADI ABOU-ZAHRA: We +could come back to this later. + +00:37:34.633 --> 00:37:36.699 align:middle line:85% position:50% size:65% +So maybe let me summarize. + +00:37:36.766 --> 00:37:42.933 align:middle line:79% position:50% size:68% +So, the point is that +this rule, there would be a + +00:37:43.000 --> 00:37:46.366 align:middle line:79% position:50% size:53% +rule that is talking +about, let's say text + +00:37:46.433 --> 00:37:50.066 align:middle line:79% position:50% size:58% +alternatives for images +and these will consider + +00:37:50.133 --> 00:37:53.133 align:middle line:79% position:50% size:60% +different techniques and +the examples, the test + +00:37:53.199 --> 00:37:57.666 align:middle line:79% position:50% size:63% +cases in the rule would +have an example when what + +00:37:57.733 --> 00:38:01.033 align:middle line:79% position:50% size:75% +happens if there is a +description next to the image. + +00:38:01.099 --> 00:38:04.733 align:middle line:79% position:50% size:58% +What in that case, what +the alt text should be? + +00:38:04.800 --> 00:38:07.466 align:middle line:79% position:50% size:58% +So, if we were to write +the rule for a good alt + +00:38:07.533 --> 00:38:12.866 align:middle line:79% position:50% size:65% +text, so the rule that we +have right now only checks + +00:38:12.933 --> 00:38:16.400 align:middle line:79% position:50% size:63% +for alt text +whether it exists or not. + +00:38:16.466 --> 00:38:18.633 align:middle line:79% position:50% size:63% +And if we would have a +rule that checks how good + +00:38:18.699 --> 00:38:20.733 align:middle line:79% position:50% size:60% +an alt text is, it would +be - we would have tests + +00:38:20.800 --> 00:38:25.066 align:middle line:79% position:50% size:58% +cases where we would +have different types of + +00:38:25.133 --> 00:38:30.166 align:middle line:79% position:50% size:63% +situations and so that +people could compare test + +00:38:30.233 --> 00:38:31.766 align:middle line:85% position:50% size:48% +cases towards that. + +00:38:31.833 --> 00:38:34.300 align:middle line:79% position:50% size:55% +Again, we could come +back to this later on. + +00:38:34.366 --> 00:38:38.866 align:middle line:79% position:50% size:65% +We are kind of running +out of time but one of the + +00:38:38.933 --> 00:38:47.866 align:middle line:79% position:50% size:55% +things that was asked +is, do rules cover all + +00:38:47.933 --> 00:38:49.000 align:middle line:85% position:50% size:60% +of the success criteria? + +00:38:49.066 --> 00:38:54.066 align:middle line:79% position:50% size:73% +Or will the work +continue to cover everything? + +00:38:57.533 --> 00:38:59.800 align:middle line:79% position:50% size:35% +Who wants to +take that one? + +00:38:59.866 --> 00:39:01.533 align:middle line:79% position:50% size:38% +>> WILCO FIERS: +I'll go. + +00:39:01.599 --> 00:39:05.633 align:middle line:79% position:50% size:63% +So, we haven't completed +all the success criteria. + +00:39:05.699 --> 00:39:10.900 align:middle line:79% position:50% size:58% +I don't know if we ever +will fully complete + +00:39:10.966 --> 00:39:12.900 align:middle line:79% position:50% size:48% +everything but we +certainly intend to + +00:39:12.966 --> 00:39:16.466 align:middle line:79% position:50% size:45% +continue to go +on with this work. + +00:39:16.533 --> 00:39:20.300 align:middle line:79% position:50% size:63% +WAI-Tools Project is over +but the ACT work is not. + +00:39:20.366 --> 00:39:24.333 align:middle line:79% position:50% size:63% +This is going to be, you +know, ongoing projects to + +00:39:24.400 --> 00:39:26.099 align:middle line:79% position:50% size:50% +continue to +extend the coverage. + +00:39:26.166 --> 00:39:32.266 align:middle line:79% position:50% size:65% +Some of the things not +currently done is so - our + +00:39:32.333 --> 00:39:37.866 align:middle line:79% position:50% size:68% +main focus has been on HTML +where it sees it as an SVG. + +00:39:37.933 --> 00:39:41.533 align:middle line:79% position:50% size:50% +There are definitely +still gaps in those, + +00:39:41.599 --> 00:39:44.266 align:middle line:79% position:50% size:58% +specifically the things +that are difficult to + +00:39:44.333 --> 00:39:46.433 align:middle line:79% position:50% size:53% +automate, but beyond +that, there are other + +00:39:46.500 --> 00:39:48.233 align:middle line:79% position:50% size:43% +technologies to +consider as well. + +00:39:48.300 --> 00:39:51.333 align:middle line:79% position:50% size:65% +There are currently no +rules for PDF for example. + +00:39:51.400 --> 00:39:53.266 align:middle line:79% position:50% size:58% +That is certainly a +thing that I would love + +00:39:53.333 --> 00:39:55.266 align:middle line:85% position:50% size:45% +to see us develop. + +00:39:55.333 --> 00:39:59.900 align:middle line:79% position:50% size:60% +So, there's definitely +more work and it is also + +00:39:59.966 --> 00:40:03.733 align:middle line:79% position:50% size:63% +good to mention that this +is an ongoing process. + +00:40:03.800 --> 00:40:07.699 align:middle line:79% position:50% size:58% +As technology develops, +as the HTML standard + +00:40:07.766 --> 00:40:11.533 align:middle line:79% position:50% size:55% +develops, as assistive +technologies develop, + +00:40:11.599 --> 00:40:14.500 align:middle line:79% position:50% size:60% +these rules will +continually get updated. + +00:40:14.566 --> 00:40:17.633 align:middle line:79% position:50% size:65% +So, unlike WCAG, these +rules are an ongoing thing + +00:40:17.699 --> 00:40:21.266 align:middle line:79% position:50% size:68% +that need to be maintained +so this work is never going + +00:40:21.333 --> 00:40:23.166 align:middle line:79% position:50% size:55% +to end and I'm going +to continue doing this + +00:40:23.233 --> 00:40:28.166 align:middle line:85% position:50% size:48% +until I retire, so. + +00:40:28.233 --> 00:40:29.800 align:middle line:79% position:50% size:58% +>> SHADI ABOU-ZAHRA: +Yeah, I like to use the + +00:40:29.866 --> 00:40:33.599 align:middle line:79% position:50% size:70% +metaphor of snowballs, +especially now in winter for + +00:40:33.666 --> 00:40:37.000 align:middle line:79% position:50% size:68% +many of us, so snowballing, +and I think this project + +00:40:37.066 --> 00:40:40.300 align:middle line:79% position:50% size:65% +started the snowball with +seventy rules and now it's + +00:40:40.366 --> 00:40:43.199 align:middle line:79% position:50% size:75% +really, as Carlos was +saying, now it's up to you the + +00:40:43.266 --> 00:40:46.433 align:middle line:79% position:50% size:48% +community to +help continue that. + +00:40:46.500 --> 00:40:48.699 align:middle line:79% position:50% size:58% +Speaking of, Wilco, +while I have you on the + +00:40:48.766 --> 00:40:50.366 align:middle line:79% position:50% size:48% +line, there was +a question for you. + +00:40:50.433 --> 00:40:52.833 align:middle line:79% position:50% size:43% +This might be a +misunderstanding. + +00:40:52.900 --> 00:40:57.199 align:middle line:79% position:50% size:55% +Can you talk about how +authoritative the ACT + +00:40:57.266 --> 00:41:00.699 align:middle line:79% position:50% size:55% +Rules are versus the +WCAG success criteria? + +00:41:00.766 --> 00:41:03.800 align:middle line:79% position:50% size:65% +What is the normative part +and what is informative? + +00:41:03.866 --> 00:41:05.966 align:middle line:79% position:50% size:70% +>> WILCO FIERS: +That's a fantastic question. + +00:41:06.033 --> 00:41:11.699 align:middle line:79% position:50% size:63% +So, the WCAG success +criteria are published by + +00:41:11.766 --> 00:41:14.533 align:middle line:79% position:50% size:38% +the W3C as a +Recommendation. + +00:41:14.599 --> 00:41:17.599 align:middle line:79% position:50% size:58% +They've gone through +an extensive process of + +00:41:17.666 --> 00:41:21.533 align:middle line:79% position:50% size:50% +public reviews by +the W3C and they are + +00:41:21.599 --> 00:41:24.033 align:middle line:79% position:50% size:50% +an internationally +recognized standard. + +00:41:24.099 --> 00:41:28.900 align:middle line:79% position:50% size:45% +So, those are +solid as anything. + +00:41:28.966 --> 00:41:35.300 align:middle line:79% position:50% size:65% +ACT Rules are +written by smaller groups. + +00:41:35.366 --> 00:41:41.366 align:middle line:79% position:50% size:65% +They go through a less +involved process of review + +00:41:41.433 --> 00:41:47.500 align:middle line:79% position:50% size:60% +and they get published +still by the W3C but not + +00:41:47.566 --> 00:41:49.366 align:middle line:85% position:50% size:60% +by the whole of the W3C. + +00:41:49.433 --> 00:41:55.099 align:middle line:79% position:50% size:63% +There is no massive +W3C-wide review but there + +00:41:55.166 --> 00:41:58.033 align:middle line:79% position:50% size:60% +is a review and an +approval that comes from + +00:41:58.099 --> 00:42:00.533 align:middle line:79% position:50% size:60% +the Accessibility +Guidelines Working Group + +00:42:00.599 --> 00:42:05.000 align:middle line:79% position:50% size:40% +which are the +authors of WCAG. + +00:42:05.066 --> 00:42:09.366 align:middle line:79% position:50% size:63% +So, you can see them as +these are rules published + +00:42:09.433 --> 00:42:13.300 align:middle line:79% position:50% size:58% +by the authors of WCAG, +whereas WCAG itself is + +00:42:13.366 --> 00:42:16.333 align:middle line:79% position:50% size:60% +published by the W3C as +a standards organization + +00:42:16.400 --> 00:42:19.133 align:middle line:85% position:50% size:38% +as a whole, so. + +00:42:19.199 --> 00:42:20.866 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Thanks. + +00:42:20.933 --> 00:42:22.199 align:middle line:79% position:50% size:45% +We call these - +internally we call + +00:42:22.266 --> 00:42:23.900 align:middle line:85% position:50% size:65% +them supporting documents. + +00:42:23.966 --> 00:42:27.833 align:middle line:79% position:50% size:63% +These are the so-called +Techniques documents, the + +00:42:27.900 --> 00:42:30.099 align:middle line:79% position:50% size:58% +Understanding documents +- Understanding WCAG 2. + +00:42:30.166 --> 00:42:32.000 align:middle line:79% position:50% size:55% +So, with every success +criteria, there is a + +00:42:32.066 --> 00:42:35.833 align:middle line:79% position:50% size:60% +document associated with +it called Understanding + +00:42:35.900 --> 00:42:40.666 align:middle line:79% position:50% size:63% +Success Criteria XYZ that +explains more background + +00:42:40.733 --> 00:42:42.533 align:middle line:85% position:50% size:63% +and would help and so on. + +00:42:42.599 --> 00:42:44.566 align:middle line:79% position:50% size:58% +Then there are several +Techniques, how you can + +00:42:44.633 --> 00:42:48.366 align:middle line:79% position:50% size:65% +meet that requirement, +and now we also have tests + +00:42:48.433 --> 00:42:50.766 align:middle line:79% position:50% size:43% +associated with +success criteria. + +00:42:50.833 --> 00:42:54.300 align:middle line:79% position:50% size:68% +These supporting documents +are informative, so they're + +00:42:54.366 --> 00:42:57.233 align:middle line:79% position:50% size:70% +not normative like the +success criteria themselves. + +00:42:57.300 --> 00:43:00.599 align:middle line:79% position:50% size:68% +That is what you have +to meet but these are all - + +00:43:00.666 --> 00:43:03.366 align:middle line:79% position:50% size:58% +all these supporting +documents help you make + +00:43:03.433 --> 00:43:05.433 align:middle line:79% position:50% size:45% +sure that you meet +the requirements. + +00:43:05.500 --> 00:43:06.699 align:middle line:85% position:50% size:13% +Good. + +00:43:06.766 --> 00:43:08.633 align:middle line:79% position:50% size:58% +I will take the liberty +of answering two more + +00:43:08.699 --> 00:43:10.466 align:middle line:79% position:50% size:48% +questions and +then we'll move on. + +00:43:10.533 --> 00:43:14.066 align:middle line:79% position:50% size:60% +One question was +when will WCAG 2.2, when + +00:43:14.133 --> 00:43:15.966 align:middle line:85% position:50% size:50% +will it be released? + +00:43:16.033 --> 00:43:18.266 align:middle line:79% position:50% size:60% +That was a teaser from +you, Carlos, because you + +00:43:18.333 --> 00:43:19.400 align:middle line:79% position:50% size:38% +mentioned that +in your slides. + +00:43:19.466 --> 00:43:23.333 align:middle line:79% position:50% size:63% +Currently, it's planned +for mid-2021, so mid this + +00:43:23.400 --> 00:43:26.800 align:middle line:79% position:50% size:60% +year we expect the new +version of WCAG 2.2, and + +00:43:26.866 --> 00:43:30.533 align:middle line:79% position:50% size:63% +hopefully with as many +test rules as possible as + +00:43:30.599 --> 00:43:32.599 align:middle line:79% position:50% size:53% +part of the +supporting documents. + +00:43:32.666 --> 00:43:35.133 align:middle line:79% position:50% size:73% +And then there was +another really good question. + +00:43:35.199 --> 00:43:38.966 align:middle line:79% position:50% size:60% +When can we expect - +can we expect the WCAG 3 + +00:43:39.033 --> 00:43:43.233 align:middle line:79% position:50% size:65% +guidelines to follow the +same approach as ACT Rules + +00:43:43.300 --> 00:43:46.133 align:middle line:79% position:50% size:68% +so WCAG 3 success +criteria can be tested more + +00:43:46.199 --> 00:43:49.466 align:middle line:79% position:50% size:65% +objectively with +automation where relevant? + +00:43:49.533 --> 00:43:52.233 align:middle line:79% position:50% size:65% +This is a really good +question because just this + +00:43:52.300 --> 00:43:55.933 align:middle line:79% position:50% size:60% +- just last week on +Thursday, fresh, hot off + +00:43:56.000 --> 00:44:01.466 align:middle line:79% position:50% size:65% +the press, we, the W3C, +published the first public + +00:44:01.533 --> 00:44:04.133 align:middle line:85% position:50% size:45% +draft of WCAG 3.0. + +00:44:04.199 --> 00:44:07.400 align:middle line:79% position:50% size:58% +This is still a very, +very early draft and we + +00:44:07.466 --> 00:44:08.500 align:middle line:85% position:50% size:63% +would love your comments. + +00:44:08.566 --> 00:44:11.699 align:middle line:79% position:50% size:53% +We, of course, want +to make WCAG 3.0 more + +00:44:11.766 --> 00:44:15.699 align:middle line:79% position:50% size:58% +testable and so we +want to look at as much + +00:44:15.766 --> 00:44:18.833 align:middle line:79% position:50% size:55% +approaches, as much +carry-over as possible + +00:44:18.900 --> 00:44:20.000 align:middle line:85% position:50% size:43% +of the ACT Rules. + +00:44:20.066 --> 00:44:25.000 align:middle line:79% position:50% size:73% +At the same time, WCAG 3 has +a very new conformance model. + +00:44:25.066 --> 00:44:28.933 align:middle line:79% position:50% size:50% +So, they are looking +at different ways of + +00:44:29.000 --> 00:44:33.633 align:middle line:79% position:50% size:70% +evaluating, so not just +binary but maybe on a scale. + +00:44:33.699 --> 00:44:36.533 align:middle line:79% position:50% size:65% +This is maybe a topic for +another webinar about WCAG + +00:44:36.599 --> 00:44:41.666 align:middle line:79% position:50% size:65% +3.0 but the thing is, yes, +we are working closely to + +00:44:41.733 --> 00:44:44.333 align:middle line:79% position:50% size:53% +try to get - improve +the testing in future + +00:44:44.400 --> 00:44:45.733 align:middle line:85% position:50% size:43% +versions of WCAG. diff --git a/content-images/about/wai-tools/video3.vtt b/content-images/about/wai-tools/video3.vtt new file mode 100644 index 00000000000..ae5b1ea7b81 --- /dev/null +++ b/content-images/about/wai-tools/video3.vtt @@ -0,0 +1,1137 @@ +WEBVTT + +00:00:00.600 --> 00:00:02.233 align:middle line:79% position:50% size:68% +>> SHADI ABOU-ZAHRA: +The Portuguese observatory. + +00:00:02.299 --> 00:00:10.000 align:middle line:79% position:50% size:65% +So, Jorge, do you want to +introduce yourself briefly + +00:00:10.066 --> 00:00:12.066 align:middle line:79% position:50% size:48% +and then +please get started. + +00:00:18.633 --> 00:00:20.633 align:middle line:85% position:50% size:45% +And you are muted. + +00:00:30.066 --> 00:00:30.866 align:middle line:79% position:50% size:48% +>> JORGE FERNANDES: +Sorry. + +00:00:30.933 --> 00:00:35.966 align:middle line:79% position:50% size:65% +Because the button changed +the position of the mic. + +00:00:36.033 --> 00:00:45.000 align:middle line:79% position:50% size:50% +Let me also put +my camera to see me. + +00:00:45.066 --> 00:00:46.399 align:middle line:85% position:50% size:13% +Okay. + +00:00:46.466 --> 00:00:48.066 align:middle line:85% position:50% size:33% +Hi, everyone. + +00:00:48.133 --> 00:00:55.100 align:middle line:79% position:50% size:65% +I'm Jorge Fernandes from +the Portuguese Agency, the + +00:00:55.166 --> 00:01:01.000 align:middle line:79% position:50% size:53% +AMA, Administrative +Modernization Agency. + +00:01:01.066 --> 00:01:05.299 align:middle line:79% position:50% size:65% +We are responsible for the +monitorization in Portugal + +00:01:05.366 --> 00:01:07.500 align:middle line:85% position:50% size:63% +of the web accessibility. + +00:01:07.566 --> 00:01:10.433 align:middle line:79% position:50% size:53% +We are the body +responsible for that. + +00:01:10.500 --> 00:01:14.066 align:middle line:79% position:50% size:63% +We belong to the Ministry +of the Modernization of + +00:01:14.133 --> 00:01:17.366 align:middle line:79% position:50% size:40% +State and Public +Administration. + +00:01:17.433 --> 00:01:23.033 align:middle line:79% position:50% size:75% +And, well, I'm working in +the field of web accessibility + +00:01:23.099 --> 00:01:30.299 align:middle line:79% position:50% size:65% +since 1999, so in the +beginning of the WCAG 1.0. + +00:01:30.366 --> 00:01:34.766 align:middle line:79% position:50% size:78% +And I'm here to present +you the Portuguese observatory. + +00:01:34.833 --> 00:01:36.833 align:middle line:79% position:50% size:50% +I think I can +continue now, Shadi? + +00:01:44.666 --> 00:01:45.299 align:middle line:85% position:50% size:13% +Okay. + +00:01:45.366 --> 00:01:50.366 align:middle line:85% position:50% size:40% +In the -- sorry. + +00:01:55.166 --> 00:02:01.166 align:middle line:79% position:50% size:65% +I will try to get the +connection with my slides. + +00:02:01.233 --> 00:02:02.466 align:middle line:85% position:50% size:13% +Okay. + +00:02:02.533 --> 00:02:04.566 align:middle line:85% position:50% size:23% +Next one. + +00:02:04.633 --> 00:02:10.566 align:middle line:79% position:50% size:50% +So in the Portuguese +legislation, we have + +00:02:10.633 --> 00:02:16.900 align:middle line:79% position:50% size:65% +explicitly a method of +evaluation mentioned based + +00:02:16.966 --> 00:02:21.133 align:middle line:79% position:50% size:60% +in the simplified +method of the directive. + +00:02:21.199 --> 00:02:27.766 align:middle line:79% position:50% size:60% +In the preamble of the +Implementation Act about + +00:02:27.833 --> 00:02:34.566 align:middle line:79% position:50% size:68% +monitoring, I +underlined these two ideas. + +00:02:34.633 --> 00:02:39.633 align:middle line:79% position:50% size:53% +The monitoring should +raise awareness and + +00:02:39.699 --> 00:02:45.099 align:middle line:79% position:50% size:55% +encourage learning in +Member States, and the + +00:02:45.166 --> 00:02:49.233 align:middle line:79% position:50% size:55% +overall results of the +monitoring activities + +00:02:49.300 --> 00:02:54.000 align:middle line:79% position:50% size:48% +should be made +publicly available. + +00:02:54.066 --> 00:03:01.199 align:middle line:79% position:50% size:80% +These must involve all entities, +all public sector bodies. + +00:03:01.266 --> 00:03:08.766 align:middle line:79% position:50% size:73% +And the monitoring is not +an external exercise to them. + +00:03:08.833 --> 00:03:14.300 align:middle line:79% position:50% size:73% +So it's something that they +need to do it by theirselves. + +00:03:14.366 --> 00:03:23.300 align:middle line:79% position:50% size:65% +That is one of the reasons +why we in Portugal abide + +00:03:23.366 --> 00:03:29.566 align:middle line:79% position:50% size:58% +the centralized model +of evaluation, a method + +00:03:29.633 --> 00:03:35.300 align:middle line:79% position:50% size:53% +of three steps to +each entity evaluate. + +00:03:35.366 --> 00:03:40.333 align:middle line:79% position:50% size:55% +The simplified method +based on automatic and + +00:03:40.400 --> 00:03:42.666 align:middle line:85% position:50% size:53% +semi-automatic tools. + +00:03:42.733 --> 00:03:48.866 align:middle line:79% position:50% size:73% +And in that what we call +an in-depth light is a manual + +00:03:48.933 --> 00:03:53.400 align:middle line:79% position:50% size:65% +evaluation based on a +checklist that we call the + +00:03:53.466 --> 00:03:59.000 align:middle line:79% position:50% size:73% +ten critical aspects of +the functional accessibility, + +00:03:59.066 --> 00:04:04.199 align:middle line:79% position:50% size:38% +such a kind of +W3C Easy Check. + +00:04:04.266 --> 00:04:10.233 align:middle line:79% position:50% size:53% +And, according to our +legislation, this is + +00:04:10.300 --> 00:04:14.300 align:middle line:79% position:50% size:53% +mandatory, these two +references, these two + +00:04:14.366 --> 00:04:20.733 align:middle line:79% position:50% size:63% +methods of evaluation, +and is recommended, also, + +00:04:20.800 --> 00:04:25.100 align:middle line:79% position:50% size:55% +usability tests with +people with disability + +00:04:25.166 --> 00:04:31.366 align:middle line:79% position:50% size:63% +with a minimum of one +task, one users' typology + +00:04:31.433 --> 00:04:42.133 align:middle line:79% position:50% size:78% +of the European standards, +like, you know, the EN 301 549. + +00:04:42.199 --> 00:04:49.966 align:middle line:79% position:50% size:58% +About the simplified +method, as you know, in + +00:04:50.033 --> 00:04:56.466 align:middle line:79% position:50% size:63% +the Implementing Act, the +page sample needs to have + +00:04:56.533 --> 00:05:00.300 align:middle line:79% position:50% size:45% +a number of pages +appropriate to the + +00:05:00.366 --> 00:05:05.866 align:middle line:79% position:50% size:65% +estimated size and +complexity of the website. + +00:05:05.933 --> 00:05:10.833 align:middle line:79% position:50% size:68% +This is the main principle +mentioned in the Directive. + +00:05:10.899 --> 00:05:14.800 align:middle line:79% position:50% size:63% +In the Portuguese +proposal, the page sample + +00:05:14.866 --> 00:05:20.199 align:middle line:79% position:50% size:65% +is composed by the home +page plus all pages linked + +00:05:20.266 --> 00:05:22.600 align:middle line:85% position:50% size:48% +from the home page. + +00:05:22.666 --> 00:05:27.433 align:middle line:79% position:50% size:58% +From the historical +studies in Portugal, we + +00:05:27.500 --> 00:05:33.533 align:middle line:79% position:50% size:65% +know that this means +about 60 pages per website + +00:05:33.600 --> 00:05:36.100 align:middle line:85% position:50% size:63% +in public administration. + +00:05:36.166 --> 00:05:45.766 align:middle line:79% position:50% size:58% +Evaluate with automatic +tools to WCAG 2.1 AA. + +00:05:45.833 --> 00:05:50.466 align:middle line:79% position:50% size:65% +And the public sector body +can use any kind of tool, + +00:05:50.533 --> 00:05:57.066 align:middle line:79% position:50% size:55% +but our team will load +all the samples in the + +00:05:57.133 --> 00:06:01.300 align:middle line:79% position:50% size:65% +Portuguese observatory, +and all of them is public. + +00:06:06.366 --> 00:06:12.333 align:middle line:79% position:50% size:63% +All our tools used same +engine, the QualWeb, that + +00:06:12.399 --> 00:06:15.366 align:middle line:85% position:50% size:63% +already Carlos mentioned. + +00:06:15.433 --> 00:06:19.233 align:middle line:85% position:50% size:65% +We have the AccessMonitor. + +00:06:19.300 --> 00:06:23.033 align:middle line:79% position:50% size:48% +That is a tool +that produced a web + +00:06:23.100 --> 00:06:26.766 align:middle line:79% position:50% size:48% +accessibility +report of one page. + +00:06:26.833 --> 00:06:29.966 align:middle line:79% position:50% size:38% +That is also +in our website. + +00:06:30.033 --> 00:06:35.666 align:middle line:79% position:50% size:65% +And the observatory is an +awareness tool with global + +00:06:35.733 --> 00:06:39.600 align:middle line:79% position:50% size:33% +statistics of +the entities. + +00:06:39.666 --> 00:06:44.500 align:middle line:79% position:50% size:53% +If the entity want +to know more details, + +00:06:44.566 --> 00:06:48.100 align:middle line:85% position:50% size:50% +they need MyMonitor. + +00:06:48.166 --> 00:06:54.366 align:middle line:79% position:50% size:60% +So the observatory don't +give the detail of the + +00:06:54.433 --> 00:06:58.066 align:middle line:79% position:50% size:65% +information -- all +information of the sample. + +00:06:58.133 --> 00:07:02.933 align:middle line:79% position:50% size:60% +To do that, the entities +need to have -- to have + +00:07:03.000 --> 00:07:04.500 align:middle line:85% position:50% size:58% +access to another tool. + +00:07:04.566 --> 00:07:06.966 align:middle line:85% position:50% size:55% +That is the MyMonitor. + +00:07:07.033 --> 00:07:14.033 align:middle line:79% position:50% size:65% +So we give them the +MyMonitor, and, of course, + +00:07:14.100 --> 00:07:18.600 align:middle line:79% position:50% size:63% +we include our contact +network of people that is + +00:07:18.666 --> 00:07:22.266 align:middle line:85% position:50% size:53% +working in the field. + +00:07:22.333 --> 00:07:29.899 align:middle line:79% position:50% size:58% +So the impact of the +ACT Rules in our tools. + +00:07:29.966 --> 00:07:36.366 align:middle line:79% position:50% size:60% +Until now we change 20 +of 80 of our tests based + +00:07:36.433 --> 00:07:40.266 align:middle line:85% position:50% size:40% +in 13 ACT Rules. + +00:07:40.333 --> 00:07:47.033 align:middle line:79% position:50% size:68% +Get a more comprehensive +analysis, a post-processing + +00:07:47.100 --> 00:07:52.899 align:middle line:79% position:50% size:65% +browser analysis, see more +-- at the moment, our tool + +00:07:52.966 --> 00:07:56.133 align:middle line:85% position:50% size:58% +see more HTML elements. + +00:07:56.199 --> 00:08:03.466 align:middle line:79% position:50% size:73% +Our tool is also ready to +translate to other languages. + +00:08:03.533 --> 00:08:09.766 align:middle line:79% position:50% size:60% +The output is in +EARL and the CSV format. + +00:08:09.833 --> 00:08:16.899 align:middle line:79% position:50% size:73% +We can output all the +results in these two formats. + +00:08:16.966 --> 00:08:19.933 align:middle line:85% position:50% size:65% +The tools are open source. + +00:08:20.000 --> 00:08:23.366 align:middle line:79% position:50% size:45% +All the source +are in the GitHub. + +00:08:23.433 --> 00:08:35.033 align:middle line:79% position:50% size:68% +You can find them at +the amagovpt.github.io/eed. + +00:08:35.100 --> 00:08:40.933 align:middle line:79% position:50% size:65% +It is the local where +you found the source code. + +00:08:41.000 --> 00:08:45.566 align:middle line:79% position:50% size:63% +So the biggest impact of +the WAI-Tools Project are + +00:08:45.633 --> 00:08:47.833 align:middle line:85% position:50% size:45% +in the test rules. + +00:08:47.899 --> 00:08:54.833 align:middle line:79% position:50% size:73% +Just to mention two examples, +the headings and the images + +00:08:54.899 --> 00:09:01.166 align:middle line:79% position:50% size:68% +that we already spoke +in this presentation today. + +00:09:01.233 --> 00:09:05.799 align:middle line:79% position:50% size:60% +Well, talking about the +headings, in the past we + +00:09:05.866 --> 00:09:12.166 align:middle line:79% position:50% size:53% +have headings as +traditional H1 to H6. + +00:09:12.233 --> 00:09:14.066 align:middle line:85% position:50% size:38% +We detect that. + +00:09:14.133 --> 00:09:18.799 align:middle line:79% position:50% size:60% +Now we detect more +headings because we have + +00:09:18.866 --> 00:09:23.966 align:middle line:79% position:50% size:53% +the traditional ones +and the new ones that + +00:09:24.033 --> 00:09:29.500 align:middle line:79% position:50% size:43% +I mention here +has an ARIA-like. + +00:09:29.566 --> 00:09:34.266 align:middle line:79% position:50% size:68% +We have the attribute role +heading in conjunction with + +00:09:34.333 --> 00:09:39.833 align:middle line:79% position:50% size:65% +the attribute level +with 1 to 6 and maybe more + +00:09:39.899 --> 00:09:42.466 align:middle line:85% position:50% size:65% +headings with more levels. + +00:09:42.533 --> 00:09:49.600 align:middle line:79% position:50% size:80% +So all of them at the +moment are detected by our tool. + +00:09:49.666 --> 00:09:57.366 align:middle line:79% position:50% size:65% +About the images, in the +past, we only analyzed the + +00:09:57.433 --> 00:10:00.033 align:middle line:85% position:50% size:45% +alt of the images. + +00:10:00.100 --> 00:10:05.166 align:middle line:79% position:50% size:65% +Now we have four different +attributes where we can + +00:10:05.233 --> 00:10:11.633 align:middle line:79% position:50% size:75% +put also the alternative +text, the ARIA-labelledby, the + +00:10:11.700 --> 00:10:18.333 align:middle line:79% position:50% size:73% +ARIA-label, the alt, and also +when we use the title alone. + +00:10:24.666 --> 00:10:29.299 align:middle line:79% position:50% size:65% +If you want to try all the +ACT Rules that we have at + +00:10:29.366 --> 00:10:34.833 align:middle line:79% position:50% size:73% +the moment, you can check +-- you can check the QualWeb. + +00:10:34.899 --> 00:10:47.366 align:middle line:79% position:50% size:50% +That is in +qualweb.di.fc.ul.pt. + +00:10:47.433 --> 00:10:53.733 align:middle line:85% position:50% size:58% +So qualweb.di.fc.ul.pt. + +00:10:53.799 --> 00:11:04.433 align:middle line:79% position:50% size:65% +And there you can try all +the 67 -- 67 rules that we + +00:11:04.500 --> 00:11:10.133 align:middle line:79% position:50% size:55% +have at the moment +from the branch of 70. + +00:11:10.200 --> 00:11:15.299 align:middle line:79% position:50% size:48% +So let's try a demo +of the observatory. + +00:11:15.366 --> 00:11:20.299 align:middle line:79% position:50% size:55% +And now is the moment +of David Attenborough. + +00:11:20.366 --> 00:11:24.566 align:middle line:79% position:50% size:40% +I will put +running a video. + +00:11:24.633 --> 00:11:26.433 align:middle line:85% position:50% size:28% +Let me try. + +00:11:29.933 --> 00:11:30.700 align:middle line:79% position:50% size:48% +>> JORGE FERNANDES: +Let's try a demo + +00:11:30.766 --> 00:11:32.833 align:middle line:85% position:50% size:48% +of the observatory. + +00:11:32.899 --> 00:11:35.799 align:middle line:79% position:50% size:55% +The observatory is +organized from the big + +00:11:35.866 --> 00:11:40.133 align:middle line:79% position:50% size:63% +picture until the +drill down of one entity. + +00:11:40.200 --> 00:11:43.299 align:middle line:79% position:50% size:50% +So the first page of +the observatory have + +00:11:43.366 --> 00:11:44.966 align:middle line:85% position:50% size:40% +the big picture. + +00:11:45.033 --> 00:11:48.133 align:middle line:85% position:50% size:65% +Global score from 1 to 10. + +00:11:48.200 --> 00:11:50.500 align:middle line:85% position:50% size:58% +Ten is a good practice. + +00:11:50.566 --> 00:11:53.033 align:middle line:79% position:50% size:60% +Total number of +categories, total number + +00:11:53.100 --> 00:11:57.866 align:middle line:79% position:50% size:55% +of websites, pages, +a ranking table of the + +00:11:57.933 --> 00:12:03.133 align:middle line:79% position:50% size:70% +categories with the number +of pages conformed by level. + +00:12:03.200 --> 00:12:07.600 align:middle line:79% position:50% size:68% +We started by +entering the 308 Portuguese + +00:12:07.666 --> 00:12:12.533 align:middle line:79% position:50% size:63% +municipalities with the +sample home page plus all + +00:12:12.600 --> 00:12:15.700 align:middle line:79% position:50% size:43% +pages linked +to the home page. + +00:12:15.766 --> 00:12:22.200 align:middle line:79% position:50% size:78% +We have more than 50,000 +pages introduced at the moment. + +00:12:28.233 --> 00:12:31.666 align:middle line:79% position:50% size:60% +We have data graphics as +histogram of scores and + +00:12:31.733 --> 00:12:33.766 align:middle line:79% position:50% size:40% +the ten most +frequent errors. + +00:12:44.866 --> 00:12:48.133 align:middle line:79% position:50% size:65% +Let's go now to a +category, for example, the + +00:12:48.200 --> 00:12:51.366 align:middle line:79% position:50% size:38% +category of the +municipalities. + +00:12:51.433 --> 00:12:55.633 align:middle line:79% position:50% size:65% +Second level, the level of +category or directory, in + +00:12:55.700 --> 00:12:58.799 align:middle line:79% position:50% size:38% +this case, the +municipalities. + +00:12:58.866 --> 00:13:02.633 align:middle line:79% position:50% size:60% +The same organization of +the first level with the + +00:13:02.700 --> 00:13:06.700 align:middle line:79% position:50% size:65% +score, the statistical +graphics, the total number + +00:13:06.766 --> 00:13:11.899 align:middle line:79% position:50% size:65% +of websites, the +ranking table of entities. + +00:13:11.966 --> 00:13:15.500 align:middle line:79% position:50% size:55% +Let's see the +municipality of Murca. + +00:13:15.566 --> 00:13:20.433 align:middle line:79% position:50% size:65% +The average score of 9.9, +the total number of pages, + +00:13:20.500 --> 00:13:27.933 align:middle line:79% position:50% size:73% +243, three pages +conform with level A, and 226 + +00:13:28.000 --> 00:13:31.133 align:middle line:85% position:50% size:58% +pages conform with AAA. + +00:13:31.200 --> 00:13:32.533 align:middle line:85% position:50% size:40% +It's a good one. + +00:13:32.600 --> 00:13:35.733 align:middle line:79% position:50% size:55% +Let's see the Murca +municipality in detail + +00:13:35.799 --> 00:13:37.600 align:middle line:85% position:50% size:48% +in the third level. + +00:13:37.666 --> 00:13:40.833 align:middle line:79% position:50% size:53% +Again, the same +organization of data. + +00:13:40.899 --> 00:13:46.333 align:middle line:79% position:50% size:60% +Histogram, the ten most +frequent errors, the top + +00:13:46.399 --> 00:13:51.933 align:middle line:79% position:50% size:63% +five errors by conformity +level, an accessibility + +00:13:52.000 --> 00:13:54.600 align:middle line:85% position:50% size:60% +plot in a radar graphic. + +00:13:54.666 --> 00:13:57.566 align:middle line:79% position:50% size:73% +The more full the +circumference is, the better. + +00:14:01.566 --> 00:14:05.766 align:middle line:79% position:50% size:70% +And a table with +detailed error distribution. + +00:14:05.833 --> 00:14:08.533 align:middle line:85% position:50% size:43% +Work in progress. + +00:14:08.600 --> 00:14:14.600 align:middle line:79% position:50% size:63% +An observatory with more +info and with a new look. + +00:14:14.666 --> 00:14:19.299 align:middle line:79% position:50% size:78% +The evolution of page +conformity on the left graphic, + +00:14:19.366 --> 00:14:23.066 align:middle line:79% position:50% size:45% +the accessibility +plot on the right. + +00:14:23.133 --> 00:14:25.600 align:middle line:85% position:50% size:60% +The histogram of scores. + +00:14:31.399 --> 00:14:35.266 align:middle line:79% position:50% size:55% +The bad practices, but +also the board of good + +00:14:35.333 --> 00:14:40.899 align:middle line:79% position:50% size:63% +practices with the better +practices by WCAG level. + +00:14:47.233 --> 00:14:50.433 align:middle line:79% position:50% size:58% +And, also, the detailed +distribution of all the + +00:14:50.500 --> 00:14:52.566 align:middle line:85% position:50% size:58% +better practices found. + +00:15:00.799 --> 00:15:03.299 align:middle line:79% position:50% size:48% +Let's try a demo +of the observatory. + +00:15:07.266 --> 00:15:09.866 align:middle line:79% position:50% size:53% +>> JORGE FERNANDES: +And now I have also a + +00:15:09.933 --> 00:15:14.299 align:middle line:79% position:50% size:58% +demonstration of our +accessibility statement + +00:15:14.366 --> 00:15:19.533 align:middle line:79% position:50% size:55% +that is made based +on -- in the WAI-Tools + +00:15:19.600 --> 00:15:22.633 align:middle line:85% position:50% size:60% +accessibility statement. + +00:15:22.700 --> 00:15:30.333 align:middle line:79% position:50% size:63% +We have a crawler to pass +through all websites of + +00:15:30.399 --> 00:15:37.299 align:middle line:79% position:50% size:75% +the public administration, +and we can collect information + +00:15:37.366 --> 00:15:43.899 align:middle line:79% position:50% size:65% +about the +conformity level of the -- + +00:15:43.966 --> 00:15:49.299 align:middle line:79% position:50% size:60% +that is mentioned in the +accessibility statement. + +00:15:49.366 --> 00:15:55.133 align:middle line:79% position:50% size:68% +And our goal is to collect +all the information because + +00:15:55.200 --> 00:15:58.033 align:middle line:79% position:50% size:50% +our accessibility +statement is machine + +00:15:58.100 --> 00:16:01.633 align:middle line:79% position:50% size:65% +readable, so it is +possible with a crawler to + +00:16:01.700 --> 00:16:07.366 align:middle line:79% position:50% size:65% +get this +information automatically. + +00:16:07.433 --> 00:16:12.266 align:middle line:79% position:50% size:60% +And I will show you also +what we can also do with + +00:16:12.333 --> 00:16:17.766 align:middle line:79% position:50% size:60% +the machine-readable +reading, for example, an + +00:16:17.833 --> 00:16:21.600 align:middle line:79% position:50% size:63% +accessibility statement +already published and use + +00:16:21.666 --> 00:16:27.633 align:middle line:79% position:50% size:60% +it to create my +accessibility statement. + +00:16:27.700 --> 00:16:30.899 align:middle line:85% position:50% size:58% +Let's see another demo. + +00:16:30.966 --> 00:16:32.733 align:middle line:79% position:50% size:60% +>> JORGE FERNANDES: +A demo of the Portuguese + +00:16:32.799 --> 00:16:35.500 align:middle line:85% position:50% size:50% +statement generator. + +00:16:35.566 --> 00:16:39.399 align:middle line:79% position:50% size:73% +Technically the +Portuguese generator is based + +00:16:39.466 --> 00:16:43.166 align:middle line:85% position:50% size:68% +on the WAI-Tools generator. + +00:16:43.233 --> 00:16:50.666 align:middle line:79% position:50% size:70% +Imagine that you are +navigating in the ePortugal, + +00:16:50.733 --> 00:16:54.033 align:middle line:79% position:50% size:48% +the citizen +portal of Portugal. + +00:16:54.100 --> 00:16:59.600 align:middle line:79% position:50% size:70% +And you want to found +the accessibility statement. + +00:16:59.666 --> 00:17:04.833 align:middle line:79% position:50% size:65% +You know that if you use +the suffix "accessibility" + +00:17:04.900 --> 00:17:10.533 align:middle line:79% position:50% size:73% +in the URL, "acessibilidade" +in Portuguese, it is supposed + +00:17:10.599 --> 00:17:14.099 align:middle line:79% position:50% size:60% +to find the +accessibility statement. + +00:17:14.166 --> 00:17:20.033 align:middle line:79% position:50% size:60% +And here it is, the +accessibility statement, + +00:17:20.099 --> 00:17:24.733 align:middle line:79% position:50% size:65% +with the references of the +analysis done, following + +00:17:24.799 --> 00:17:30.900 align:middle line:79% position:50% size:78% +the model of the directive +and the Portuguese legislation. + +00:17:30.966 --> 00:17:33.833 align:middle line:79% position:50% size:55% +At the bottom of all +statements, we found a + +00:17:33.900 --> 00:17:38.933 align:middle line:79% position:50% size:60% +link to the +generator used to do it. + +00:17:39.000 --> 00:17:42.500 align:middle line:79% position:50% size:60% +My goal is to create the +accessibility statement + +00:17:42.566 --> 00:17:44.466 align:middle line:85% position:50% size:35% +to my website. + +00:17:44.533 --> 00:17:48.900 align:middle line:79% position:50% size:58% +I can use the +button "upload by URL." + +00:17:48.966 --> 00:17:53.500 align:middle line:79% position:50% size:73% +I can create my +accessibility statement based + +00:17:53.566 --> 00:17:56.366 align:middle line:85% position:50% size:60% +on the one of ePortugal. + +00:17:56.433 --> 00:18:00.500 align:middle line:79% position:50% size:63% +I enter the URL of +ePortugal statement page, + +00:18:00.566 --> 00:18:04.900 align:middle line:85% position:50% size:60% +I press OK, and is done. + +00:18:04.966 --> 00:18:08.433 align:middle line:79% position:50% size:60% +And I have the form +with all the data of the + +00:18:08.500 --> 00:18:12.333 align:middle line:79% position:50% size:60% +ePortugal +accessibility statement. + +00:18:12.400 --> 00:18:16.799 align:middle line:79% position:50% size:70% +Now I change the data +according to my organization + +00:18:16.866 --> 00:18:22.666 align:middle line:79% position:50% size:75% +and website, press the +button "Preview and Download," + +00:18:22.733 --> 00:18:26.200 align:middle line:79% position:50% size:53% +and the statement is +already with my data. + +00:18:26.266 --> 00:18:29.500 align:middle line:79% position:50% size:55% +Well, more or less, +because in this demo I + +00:18:29.566 --> 00:18:35.366 align:middle line:79% position:50% size:73% +only change one field, +the name of the organization. + +00:18:35.433 --> 00:18:38.933 align:middle line:79% position:50% size:58% +Then I press the +button "Download HTML." + +00:18:45.799 --> 00:18:48.666 align:middle line:85% position:50% size:55% +I got the HTML format. + +00:18:48.733 --> 00:18:53.000 align:middle line:79% position:50% size:63% +Only the structure of the +document, not the styles. + +00:18:53.066 --> 00:18:57.566 align:middle line:79% position:50% size:60% +I copy paste this +HTML code to my website. + +00:18:57.633 --> 00:19:01.666 align:middle line:79% position:50% size:58% +And when I did that, my +accessibility statement + +00:19:01.733 --> 00:19:08.900 align:middle line:79% position:50% size:68% +got the style of my site, +and everything is in place. + +00:19:08.966 --> 00:19:12.000 align:middle line:79% position:50% size:58% +And we got another +accessibility statement + +00:19:12.066 --> 00:19:19.166 align:middle line:79% position:50% size:73% +machine readable, in this +case, based on the ePortugal. + +00:19:19.233 --> 00:19:29.099 align:middle line:79% position:50% size:55% +>> JORGE FERNANDES: +Okay, so a demo of the + +00:19:29.166 --> 00:19:32.633 align:middle line:79% position:50% size:65% +Portuguese +statement generator again. + +00:19:32.700 --> 00:19:37.033 align:middle line:79% position:50% size:58% +At the moment, we -- +with the crawler, it is + +00:19:37.099 --> 00:19:43.400 align:middle line:79% position:50% size:65% +possible from data from +today we know that we have + +00:19:43.466 --> 00:19:47.066 align:middle line:85% position:50% size:70% +32 accessibility statements. + +00:19:47.133 --> 00:19:52.333 align:middle line:79% position:50% size:63% +We do 16 compliant, eight +partially compliant, and + +00:19:52.400 --> 00:19:57.533 align:middle line:79% position:50% size:60% +eight no compliant is +some of the data that it + +00:19:57.599 --> 00:20:01.333 align:middle line:79% position:50% size:55% +is possible to get +automatically from the + +00:20:01.400 --> 00:20:05.099 align:middle line:85% position:50% size:60% +accessibility statement. + +00:20:05.166 --> 00:20:10.433 align:middle line:79% position:50% size:53% +So if you want to see +live the Portuguese + +00:20:10.500 --> 00:20:16.033 align:middle line:79% position:50% size:40% +observatory, you +can use the URL + +00:20:16.099 --> 00:20:21.166 align:middle line:79% position:50% size:70% +observatorio.acessibilidade. +gov.pt. + +00:20:21.233 --> 00:20:29.233 align:middle line:79% position:50% size:60% +So observatorio, +O-B-S-E-R-V-A-T-O-R-I-O. + +00:20:29.299 --> 00:20:34.066 align:middle line:79% position:50% size:50% +And that's it +for my presentation. + +00:20:34.133 --> 00:20:36.133 align:middle line:85% position:50% size:50% +Thank you very much. + +00:20:36.200 --> 00:20:38.333 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Thank you, Jorge. + +00:20:38.400 --> 00:20:42.000 align:middle line:79% position:50% size:68% +Yeah, so, actually, we +have a question right away. + +00:20:42.066 --> 00:20:45.000 align:middle line:85% position:50% size:45% +The question is -- + +00:20:45.066 --> 00:20:46.599 align:middle line:79% position:50% size:48% +>> JORGE FERNANDES: +Don't say. + +00:20:46.666 --> 00:20:49.266 align:middle line:85% position:50% size:10% +Yes. + +00:20:49.333 --> 00:20:50.733 align:middle line:79% position:50% size:63% +>> SHADI ABOU-ZAHRA: +If we want to install the + +00:20:50.799 --> 00:20:56.733 align:middle line:79% position:50% size:65% +testing service to our own +server, do we need -- oop, + +00:20:56.799 --> 00:21:02.833 align:middle line:79% position:50% size:65% +and I moved -- do we need +the QualWeb core in GitHub + +00:21:02.900 --> 00:21:08.166 align:middle line:79% position:50% size:78% +only, or what is the +purpose of the monitor-service, + +00:21:08.233 --> 00:21:13.033 align:middle line:79% position:50% size:50% +et cetera, repos +in ama.gov.pt repos? + +00:21:13.099 --> 00:21:15.500 align:middle line:79% position:50% size:53% +So this is a bit of +a technical question. + +00:21:15.566 --> 00:21:17.433 align:middle line:79% position:50% size:63% +>> JORGE FERNANDES: Yeah, +maybe Carlos can help me. + +00:21:17.500 --> 00:21:19.500 align:middle line:79% position:50% size:78% +>> SHADI ABOU-ZAHRA: +About the installation of this. + +00:21:19.566 --> 00:21:21.166 align:middle line:79% position:50% size:45% +Somebody wants +to use it locally. + +00:21:21.233 --> 00:21:22.233 align:middle line:85% position:50% size:18% +Carlos? + +00:21:22.299 --> 00:21:23.633 align:middle line:79% position:50% size:43% +>> CARLOS DUARTE: +Yeah, sure. + +00:21:23.700 --> 00:21:25.433 align:middle line:85% position:50% size:58% +I can help, definitely. + +00:21:25.500 --> 00:21:32.966 align:middle line:79% position:50% size:68% +You don't need to install +any QualWeb-related service + +00:21:33.033 --> 00:21:36.733 align:middle line:79% position:50% size:58% +to have the observatory +and the tools in the + +00:21:36.799 --> 00:21:39.933 align:middle line:79% position:50% size:45% +observatory +ecosystem working. + +00:21:40.000 --> 00:21:45.733 align:middle line:79% position:50% size:58% +The monitor server that +you mentioned in that + +00:21:45.799 --> 00:21:49.933 align:middle line:79% position:50% size:43% +question includes +the QualWeb core. + +00:21:50.000 --> 00:21:54.000 align:middle line:79% position:50% size:63% +So everything that +requires an accessibility + +00:21:54.066 --> 00:21:57.166 align:middle line:79% position:50% size:53% +evaluation is handled +through the monitor + +00:21:57.233 --> 00:22:01.900 align:middle line:79% position:50% size:58% +server, which is also +tasked with translating + +00:22:01.966 --> 00:22:07.500 align:middle line:79% position:50% size:63% +the outcomes of QualWeb +into the formats that the + +00:22:07.566 --> 00:22:11.833 align:middle line:79% position:50% size:65% +tools in the PT +observatory ecosystem use. + +00:22:11.900 --> 00:22:16.133 align:middle line:79% position:50% size:65% +So everything that you +need to have your own copy + +00:22:16.200 --> 00:22:19.299 align:middle line:79% position:50% size:53% +of this ecosystem +is available from the + +00:22:19.366 --> 00:22:21.966 align:middle line:85% position:50% size:55% +AMA GitHub repository. + +00:22:24.700 --> 00:22:25.866 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Right. + +00:22:25.933 --> 00:22:28.266 align:middle line:79% position:50% size:65% +So that's one particular +aspect is that this entire + +00:22:28.333 --> 00:22:31.033 align:middle line:79% position:50% size:63% +observatory and all the +tools are completely open + +00:22:31.099 --> 00:22:35.700 align:middle line:79% position:50% size:75% +source, and I'm sure they +welcome contributions as well. + +00:22:35.766 --> 00:22:40.533 align:middle line:79% position:50% size:53% +Now, Jorge, another +question was: I don't + +00:22:40.599 --> 00:22:43.933 align:middle line:79% position:50% size:58% +understand why a new +accessibility statement + +00:22:44.000 --> 00:22:47.599 align:middle line:79% position:50% size:60% +can be made by copying a +published -- an existing + +00:22:47.666 --> 00:22:49.266 align:middle line:85% position:50% size:60% +accessibility statement. + +00:22:49.333 --> 00:22:51.233 align:middle line:85% position:50% size:58% +Can you please clarify? + +00:22:51.299 --> 00:22:53.633 align:middle line:79% position:50% size:65% +>> JORGE FERNANDES: +Yeah, you know, one of the + +00:22:53.700 --> 00:22:59.200 align:middle line:79% position:50% size:58% +things in the -- when +you need to fulfill the + +00:22:59.266 --> 00:23:04.933 align:middle line:79% position:50% size:68% +accessibility statement +is to understand what is in + +00:23:05.000 --> 00:23:08.566 align:middle line:79% position:50% size:43% +different parts +of the statement. + +00:23:08.633 --> 00:23:17.333 align:middle line:79% position:50% size:68% +So when you can visualize +a final declaration already + +00:23:17.400 --> 00:23:23.633 align:middle line:79% position:50% size:55% +full field, we know +that this help also to + +00:23:23.700 --> 00:23:29.233 align:middle line:79% position:50% size:50% +adapt and to fulfill +our own declaration. + +00:23:29.299 --> 00:23:35.466 align:middle line:79% position:50% size:65% +So it's something that it +is helpful to do that, and + +00:23:35.533 --> 00:23:39.433 align:middle line:79% position:50% size:50% +it is also when +you already have the + +00:23:39.500 --> 00:23:43.400 align:middle line:79% position:50% size:55% +declaration in your +website and want to do + +00:23:43.466 --> 00:23:49.500 align:middle line:79% position:50% size:70% +changes, you need -- you +also can use the same method + +00:23:49.566 --> 00:23:54.266 align:middle line:79% position:50% size:60% +that I used here in +the demo to make changes + +00:23:54.333 --> 00:23:58.700 align:middle line:85% position:50% size:60% +of your own declaration. + +00:23:58.766 --> 00:24:03.266 align:middle line:79% position:50% size:55% +So it is a question +of it's more easier to + +00:24:03.333 --> 00:24:11.500 align:middle line:79% position:50% size:63% +understand how to fulfill +the new declaration and + +00:24:11.566 --> 00:24:17.766 align:middle line:79% position:50% size:50% +also help to modify +our own declaration. diff --git a/content-images/about/wai-tools/video4.vtt b/content-images/about/wai-tools/video4.vtt new file mode 100644 index 00000000000..758d614df97 --- /dev/null +++ b/content-images/about/wai-tools/video4.vtt @@ -0,0 +1,1963 @@ +WEBVTT + +00:00:00.400 --> 00:00:02.833 align:middle line:79% position:50% size:70% +>> SHADI ABOU-ZAHRA: For the +reporting support tools, I + +00:00:02.899 --> 00:00:05.133 align:middle line:79% position:50% size:55% +would like to call +Eric Velleman from the + +00:00:05.200 --> 00:00:08.566 align:middle line:79% position:50% size:68% +Accessibility Foundation to +come to the virtual stage. + +00:00:08.633 --> 00:00:12.166 align:middle line:79% position:50% size:55% +Eric, please introduce +yourself and take it + +00:00:12.233 --> 00:00:14.733 align:middle line:85% position:50% size:38% +away from here. + +00:00:14.800 --> 00:00:16.199 align:middle line:79% position:50% size:58% +>> ERIC VELLEMAN: Okay. +So, hi. + +00:00:16.266 --> 00:00:18.266 align:middle line:85% position:50% size:45% +I'm Eric Velleman. + +00:00:18.333 --> 00:00:25.000 align:middle line:79% position:50% size:63% +And let me first start up +here on my desktop here. + +00:00:25.066 --> 00:00:27.566 align:middle line:85% position:50% size:68% +So I hope you all see this. + +00:00:27.633 --> 00:00:32.633 align:middle line:85% position:50% size:45% +That should be it. + +00:00:35.766 --> 00:00:36.666 align:middle line:85% position:50% size:53% +So I'm Eric Velleman. + +00:00:36.733 --> 00:00:40.033 align:middle line:79% position:50% size:70% +I'm from the -- I work at +the Accessibility Foundation + +00:00:40.100 --> 00:00:41.766 align:middle line:85% position:50% size:48% +in the Netherlands. + +00:00:41.833 --> 00:00:46.433 align:middle line:79% position:50% size:50% +We worked on the ACT +Rules and on the + +00:00:46.500 --> 00:00:48.233 align:middle line:85% position:50% size:60% +reporting support tools. + +00:00:48.299 --> 00:00:50.399 align:middle line:79% position:50% size:53% +So I'll be presenting +the two tools that we + +00:00:50.466 --> 00:00:53.633 align:middle line:85% position:50% size:48% +worked on recently. + +00:00:53.700 --> 00:00:56.600 align:middle line:79% position:50% size:53% +Both tools are +undergoing an update. + +00:00:56.666 --> 00:00:59.700 align:middle line:79% position:50% size:73% +And in this short -- the two +short presentations that will + +00:00:59.766 --> 00:01:03.700 align:middle line:79% position:50% size:63% +follow now, I'll show you +what the updated tools + +00:01:03.766 --> 00:01:05.200 align:middle line:85% position:50% size:38% +will look like. + +00:01:05.266 --> 00:01:08.266 align:middle line:79% position:50% size:65% +And probably -- and I hope +you can see them soon + +00:01:08.333 --> 00:01:12.400 align:middle line:85% position:50% size:48% +on the W3C website. + +00:01:12.466 --> 00:01:13.700 align:middle line:85% position:50% size:13% +Okay. + +00:01:13.766 --> 00:01:19.166 align:middle line:79% position:50% size:50% +So first the +WCAG-EM Report Tool. + +00:01:19.233 --> 00:01:23.033 align:middle line:79% position:50% size:60% +So it used to be the -- +does everybody see this? + +00:01:26.166 --> 00:01:28.400 align:middle line:79% position:50% size:65% +>> SHADI ABOU-ZAHRA: Yeah, +we see your screen, Eric. + +00:01:28.466 --> 00:01:29.700 align:middle line:79% position:50% size:43% +>> ERIC VELLEMAN: +Okay. Good. + +00:01:29.766 --> 00:01:33.766 align:middle line:79% position:50% size:68% +So it used to be this left +thing here, which is if you + +00:01:33.833 --> 00:01:37.033 align:middle line:79% position:50% size:70% +still go to the website, you +will still see this one. + +00:01:37.099 --> 00:01:40.299 align:middle line:79% position:50% size:65% +And in the meantime, we've +been working on this one, + +00:01:40.366 --> 00:01:41.466 align:middle line:85% position:50% size:53% +the one on the right. + +00:01:41.533 --> 00:01:43.099 align:middle line:85% position:50% size:70% +So it's the updated version. + +00:01:43.166 --> 00:01:49.233 align:middle line:79% position:50% size:73% +And to get an overview, it +can be found at w3.org/WAI -- + +00:01:49.299 --> 00:01:55.133 align:middle line:79% position:50% size:73% +W-A-I -- /eval/reports -- how +do you call that small line + +00:01:55.200 --> 00:01:59.000 align:middle line:85% position:50% size:55% +tool -- forward slash. + +00:01:59.066 --> 00:02:05.599 align:middle line:79% position:50% size:70% +And what we did is we follow +the procedure to evaluate + +00:02:05.666 --> 00:02:08.233 align:middle line:79% position:50% size:38% +websites in the +reporting tool. + +00:02:08.300 --> 00:02:11.866 align:middle line:79% position:50% size:70% +It helps you generate a +report according to WCAG-EM. + +00:02:11.933 --> 00:02:14.199 align:middle line:79% position:50% size:43% +It supports and +links to relevant + +00:02:14.266 --> 00:02:16.266 align:middle line:85% position:50% size:55% +information and tools. + +00:02:16.333 --> 00:02:18.933 align:middle line:79% position:50% size:58% +It aligns with the WCAG +conformant claims. + +00:02:19.000 --> 00:02:22.199 align:middle line:79% position:50% size:50% +And it supports WCAG +2.0 and WCAG 2.1. + +00:02:22.266 --> 00:02:27.199 align:middle line:85% position:50% size:75% +It doesn't support WCAG 3 yet. + +00:02:27.266 --> 00:02:30.566 align:middle line:79% position:50% size:68% +And it can also show the +difference between the two. + +00:02:30.633 --> 00:02:34.000 align:middle line:79% position:50% size:63% +So if you had a previous +one in WCAG -- a previous + +00:02:34.066 --> 00:02:41.599 align:middle line:79% position:50% size:75% +evaluation in WCAG 2.0, you +can just add -- just only show + +00:02:41.666 --> 00:02:44.866 align:middle line:85% position:50% size:75% +the WCAG 2.1 success criteria. + +00:02:44.933 --> 00:02:47.900 align:middle line:79% position:50% size:65% +It is a manual tool, so it +doesn't do any automated + +00:02:47.966 --> 00:02:51.666 align:middle line:79% position:50% size:73% +checks by itself, but it does +have the possibility, the new + +00:02:51.733 --> 00:02:54.466 align:middle line:79% position:50% size:45% +version, to import +automated checks. + +00:02:54.533 --> 00:03:00.866 align:middle line:79% position:50% size:70% +So if you use Axe or Alfa by +Siteimprove, you can import + +00:03:00.933 --> 00:03:05.733 align:middle line:79% position:50% size:63% +the results as long as +they comply with the JSON + +00:03:05.800 --> 00:03:07.833 align:middle line:85% position:50% size:60% +standard for the import. + +00:03:07.900 --> 00:03:10.566 align:middle line:79% position:50% size:75% +It has the possibility to open +and save input, so you can + +00:03:10.633 --> 00:03:11.566 align:middle line:85% position:50% size:43% +exchange results. + +00:03:11.633 --> 00:03:14.599 align:middle line:79% position:50% size:68% +So at the end of an +evaluation, you can sort of + +00:03:14.666 --> 00:03:18.599 align:middle line:79% position:50% size:73% +save your evaluation, send it +to a friend, and the friend + +00:03:18.666 --> 00:03:21.699 align:middle line:79% position:50% size:73% +can import the evaluation and +continue with it or check it + +00:03:21.766 --> 00:03:26.599 align:middle line:79% position:50% size:75% +and send it back to you or -- +well, in this way you can work + +00:03:26.666 --> 00:03:31.866 align:middle line:79% position:50% size:65% +with multiple people on an +evaluation as long as you + +00:03:31.933 --> 00:03:34.766 align:middle line:79% position:50% size:43% +don't work on it +at the same time. + +00:03:34.833 --> 00:03:37.300 align:middle line:79% position:50% size:68% +It supports and links to +relevant information tools. + +00:03:37.366 --> 00:03:39.099 align:middle line:79% position:50% size:55% +I think I said that +in the previous slide. + +00:03:39.166 --> 00:03:41.633 align:middle line:85% position:50% size:75% +It's an easy translation file. + +00:03:41.699 --> 00:03:46.733 align:middle line:79% position:50% size:73% +So it used to be rather +complex to translate, but now + +00:03:46.800 --> 00:03:50.433 align:middle line:79% position:50% size:55% +it's -- it has an easy +translation file, so + +00:03:50.500 --> 00:03:51.599 align:middle line:85% position:50% size:48% +that makes it easy. + +00:03:51.666 --> 00:03:52.800 align:middle line:85% position:50% size:63% +There is an English file. + +00:03:52.866 --> 00:03:55.833 align:middle line:79% position:50% size:68% +If you translate everything +that is in English to your + +00:03:55.900 --> 00:03:59.033 align:middle line:79% position:50% size:73% +language, you will have your +own language file or your own + +00:03:59.099 --> 00:04:02.533 align:middle line:79% position:50% size:48% +language version of +the reporting tool. + +00:04:02.599 --> 00:04:05.699 align:middle line:79% position:50% size:70% +It has changed the new WAI +design and framework, and it + +00:04:05.766 --> 00:04:08.166 align:middle line:79% position:50% size:63% +has the possibility of +importing data from tools + +00:04:08.233 --> 00:04:11.199 align:middle line:85% position:50% size:55% +using the EARL format. + +00:04:11.266 --> 00:04:17.899 align:middle line:79% position:50% size:45% +So now let's shift +to the live demo. + +00:04:17.966 --> 00:04:20.733 align:middle line:85% position:50% size:55% +It should be this one. + +00:04:20.800 --> 00:04:21.766 align:middle line:85% position:50% size:13% +Yeah. + +00:04:21.833 --> 00:04:23.899 align:middle line:79% position:50% size:68% +So this is the live demo of +the tool that is currently + +00:04:23.966 --> 00:04:27.166 align:middle line:85% position:50% size:40% +still on GitHub. + +00:04:27.233 --> 00:04:30.699 align:middle line:79% position:50% size:75% +And we are working on it, so +you will see that you have the + +00:04:30.766 --> 00:04:32.566 align:middle line:85% position:50% size:58% +same walk-through here. + +00:04:32.633 --> 00:04:36.433 align:middle line:79% position:50% size:58% +So the Overview, Scope, +Explore, Sample, Audit, + +00:04:36.500 --> 00:04:40.266 align:middle line:79% position:50% size:60% +Summary, and View Report +are the menu items. + +00:04:40.333 --> 00:04:43.266 align:middle line:79% position:50% size:48% +And it explains you +how the tool works. + +00:04:43.333 --> 00:04:45.633 align:middle line:85% position:50% size:68% +It gives tips for using it. + +00:04:45.699 --> 00:04:49.699 align:middle line:79% position:50% size:73% +If you go to it, at the right +there's sort of a menu bar. + +00:04:49.766 --> 00:04:53.566 align:middle line:79% position:50% size:70% +You can view the reports, +start a new evaluation, open + +00:04:53.633 --> 00:04:57.000 align:middle line:79% position:50% size:63% +an evaluation from a JSON +file, or import data from + +00:04:57.066 --> 00:05:01.366 align:middle line:85% position:50% size:68% +automated evaluation tools. + +00:05:01.433 --> 00:05:05.199 align:middle line:79% position:50% size:75% +So to start a new evaluation, +you just click the button, and + +00:05:05.266 --> 00:05:07.333 align:middle line:79% position:50% size:45% +then you go to the +first page, Scope. + +00:05:07.399 --> 00:05:10.266 align:middle line:79% position:50% size:73% +Well, here you can -- this is +not really different from the + +00:05:10.333 --> 00:05:16.033 align:middle line:79% position:50% size:73% +previous version, only that +it also has a WCAG 2.1 and it + +00:05:16.100 --> 00:05:19.399 align:middle line:85% position:50% size:48% +works a bit better. + +00:05:19.466 --> 00:05:23.766 align:middle line:79% position:50% size:75% +It has all the parts that were +in the previous tools except, + +00:05:23.833 --> 00:05:26.166 align:middle line:79% position:50% size:48% +of course, the nice +part is this one. + +00:05:26.233 --> 00:05:28.399 align:middle line:79% position:50% size:55% +I should show you the +translation file here. + +00:05:28.466 --> 00:05:30.699 align:middle line:85% position:50% size:73% +So I should push Netherlands. + +00:05:30.766 --> 00:05:34.366 align:middle line:79% position:50% size:75% +You'll get the Netherlands +version of the tool, which is, + +00:05:34.433 --> 00:05:36.833 align:middle line:85% position:50% size:43% +of course, great. + +00:05:36.899 --> 00:05:39.899 align:middle line:79% position:50% size:73% +If you want your language +there, you just translate the + +00:05:39.966 --> 00:05:45.000 align:middle line:79% position:50% size:75% +file, and ask somebody at W3C +to put it in the right folder. + +00:05:45.066 --> 00:05:49.433 align:middle line:79% position:50% size:73% +And then all of a sudden, you +will have your own language + +00:05:49.500 --> 00:05:54.000 align:middle line:79% position:50% size:70% +web accessibility evaluation +report tool generator. + +00:05:54.066 --> 00:05:57.600 align:middle line:85% position:50% size:43% +Here, the sample. + +00:05:57.666 --> 00:06:01.633 align:middle line:79% position:50% size:70% +You can audit the selected +sample, which is, like, here + +00:06:01.699 --> 00:06:04.500 align:middle line:85% position:50% size:43% +with perceivable. + +00:06:04.566 --> 00:06:09.333 align:middle line:79% position:50% size:70% +You can sort of declare +something as passed, failed, + +00:06:09.399 --> 00:06:13.966 align:middle line:79% position:50% size:70% +cannot tell, not present, or +not checked, et cetera, for + +00:06:14.033 --> 00:06:17.233 align:middle line:79% position:50% size:53% +all the guidelines +and success criteria. + +00:06:17.300 --> 00:06:21.399 align:middle line:79% position:50% size:73% +You can add information for a +summary, like the name of the + +00:06:21.466 --> 00:06:24.800 align:middle line:79% position:50% size:63% +evaluator, the evaluation +commissioner, the date, + +00:06:24.866 --> 00:06:27.266 align:middle line:85% position:50% size:45% +executive summary. + +00:06:27.333 --> 00:06:30.399 align:middle line:79% position:50% size:65% +And then, finally, you can +sort of view the report. + +00:06:30.466 --> 00:06:32.100 align:middle line:85% position:50% size:70% +Multiple buttons to do that. + +00:06:32.166 --> 00:06:34.833 align:middle line:79% position:50% size:70% +And it will generate the +complete report with all the + +00:06:34.899 --> 00:06:38.600 align:middle line:79% position:50% size:65% +things that you just input +in the previous pages. + +00:06:38.666 --> 00:06:40.600 align:middle line:79% position:50% size:58% +And on this page is the +interesting button. + +00:06:40.666 --> 00:06:44.100 align:middle line:79% position:50% size:50% +Save the evaluation +data as a JSON file. + +00:06:44.166 --> 00:06:49.000 align:middle line:79% position:50% size:75% +So if you click it, you'll see +here at the bottom it saves + +00:06:49.066 --> 00:06:51.433 align:middle line:85% position:50% size:48% +an evaluation.json. + +00:06:51.500 --> 00:06:53.966 align:middle line:79% position:50% size:68% +And if you go to the first +page, you can then open the + +00:06:54.033 --> 00:06:56.133 align:middle line:85% position:50% size:53% +evaluation from JSON. + +00:06:56.199 --> 00:06:58.766 align:middle line:79% position:50% size:70% +I won't do that here because +you'll see all the documents + +00:06:58.833 --> 00:07:03.000 align:middle line:79% position:50% size:48% +on my computer, but +trust me, it works. + +00:07:03.066 --> 00:07:05.366 align:middle line:79% position:50% size:50% +That's the website +accessibility report + +00:07:05.433 --> 00:07:09.566 align:middle line:85% position:50% size:38% +generator live. + +00:07:09.633 --> 00:07:14.233 align:middle line:79% position:50% size:45% +But, once again, +great translation. + +00:07:14.300 --> 00:07:15.566 align:middle line:79% position:50% size:60% +>> SHADI ABOU-ZAHRA: +So you just changed from + +00:07:15.633 --> 00:07:16.633 align:middle line:85% position:50% size:43% +English to Dutch. + +00:07:16.699 --> 00:07:18.699 align:middle line:85% position:50% size:40% +Thank you, Eric. + +00:07:18.766 --> 00:07:23.033 align:middle line:79% position:50% size:73% +So there are already a couple +of questions and just some + +00:07:23.100 --> 00:07:27.699 align:middle line:85% position:50% size:75% +additional visual description. + +00:07:27.766 --> 00:07:30.533 align:middle line:79% position:50% size:55% +So this is not the +final design of how it + +00:07:30.600 --> 00:07:32.399 align:middle line:85% position:50% size:38% +will look like. + +00:07:32.466 --> 00:07:37.366 align:middle line:79% position:50% size:65% +It's the kind of tool +Eric had read out the URL. + +00:07:37.433 --> 00:07:41.433 align:middle line:79% position:50% size:75% +And you can find that URL also +from the project page and from + +00:07:41.500 --> 00:07:44.500 align:middle line:85% position:50% size:70% +the agenda, the report tool. + +00:07:44.566 --> 00:07:48.800 align:middle line:79% position:50% size:70% +We're updating it primarily +to support translations more + +00:07:48.866 --> 00:07:53.133 align:middle line:79% position:50% size:60% +easily, but also to put +it in the new WAI style. + +00:07:53.199 --> 00:07:56.300 align:middle line:79% position:50% size:73% +Many things here is not going +to be the final way it looks. + +00:07:56.366 --> 00:07:58.933 align:middle line:85% position:50% size:75% +It's still a work in progress. + +00:07:59.000 --> 00:08:07.633 align:middle line:85% position:50% size:53% +And so, yeah -- yeah. + +00:08:07.699 --> 00:08:14.033 align:middle line:79% position:50% size:73% +There's several questions, +Eric, that -- if you can stop + +00:08:14.100 --> 00:08:16.733 align:middle line:79% position:50% size:53% +moving around because +it's distracting me. + +00:08:18.033 --> 00:08:20.800 align:middle line:85% position:50% size:60% +>> ERIC VELLEMAN: Sorry. + +00:08:20.866 --> 00:08:23.000 align:middle line:79% position:50% size:68% +>> SHADI ABOU-ZAHRA: Moving +content is distracting. + +00:08:23.066 --> 00:08:24.533 align:middle line:85% position:50% size:70% +>> ERIC VELLEMAN: Oh, sorry. + +00:08:24.600 --> 00:08:25.466 align:middle line:85% position:50% size:68% +>> SHADI ABOUT-ZAHRA: Yeah. + +00:08:25.533 --> 00:08:28.300 align:middle line:79% position:50% size:68% +So can you clarify the +difference between open and + +00:08:28.366 --> 00:08:29.899 align:middle line:85% position:50% size:38% +importing data? + +00:08:29.966 --> 00:08:36.299 align:middle line:79% position:50% size:65% +>> ERIC VELLEMAN: Yeah. +So open is to open a file. + +00:08:36.366 --> 00:08:40.299 align:middle line:79% position:50% size:73% +So I just showed you at the +end of the evaluation you can + +00:08:40.366 --> 00:08:48.299 align:middle line:79% position:50% size:68% +save your evaluation report +in the form of a JSON file. + +00:08:48.366 --> 00:08:50.466 align:middle line:79% position:50% size:53% +And somebody else +can open your report. + +00:08:50.533 --> 00:08:54.000 align:middle line:79% position:50% size:45% +So an import, that +is to import data + +00:08:54.066 --> 00:08:55.233 align:middle line:85% position:50% size:53% +from automated tools. + +00:08:55.299 --> 00:08:58.533 align:middle line:85% position:50% size:65% +So like Axe, for instance. + +00:08:58.600 --> 00:09:05.333 align:middle line:79% position:50% size:70% +So you can use a tool to +evaluate a website, and then + +00:09:05.399 --> 00:09:10.399 align:middle line:79% position:50% size:60% +the evaluation results +can be imported into the + +00:09:10.466 --> 00:09:13.566 align:middle line:85% position:50% size:50% +WCAG-EM Report Tool. + +00:09:13.633 --> 00:09:18.066 align:middle line:79% position:50% size:73% +The small angle there is that +the only thing that will be + +00:09:18.133 --> 00:09:24.100 align:middle line:79% position:50% size:70% +imported currently are the +URLs that are in the sample. + +00:09:24.166 --> 00:09:27.100 align:middle line:79% position:50% size:75% +So it will choose from all the +URLs that you have checked + +00:09:27.166 --> 00:09:28.333 align:middle line:85% position:50% size:63% +with the automated tools. + +00:09:28.399 --> 00:09:32.466 align:middle line:79% position:50% size:73% +It will only use the results +of the pages that are in your + +00:09:32.533 --> 00:09:36.899 align:middle line:85% position:50% size:53% +sample at the moment. + +00:09:36.966 --> 00:09:38.266 align:middle line:79% position:50% size:53% +>> SHADI ABOU-ZAHRA: +Excellent. Thank you. + +00:09:38.333 --> 00:09:42.466 align:middle line:79% position:50% size:70% +And then some more technical +questions regarding the + +00:09:42.533 --> 00:09:45.166 align:middle line:85% position:50% size:40% +underlying data. + +00:09:45.233 --> 00:09:46.433 align:middle line:85% position:50% size:55% +Somebody is concerned. + +00:09:46.500 --> 00:09:50.000 align:middle line:79% position:50% size:63% +The underlying JSON data, +will it stay the same + +00:09:50.066 --> 00:09:52.066 align:middle line:85% position:50% size:48% +in the new version? + +00:09:53.066 --> 00:09:55.299 align:middle line:79% position:50% size:58% +>> ERIC VELLEMAN: +Yeah, as far as I know. + +00:09:55.366 --> 00:09:57.399 align:middle line:79% position:50% size:43% +You know, I'm not +sure about that. + +00:09:57.466 --> 00:10:00.666 align:middle line:79% position:50% size:70% +So you mean if you have JSON +data from the previous tool? + +00:10:00.733 --> 00:10:02.399 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Yeah. No. + +00:10:02.466 --> 00:10:06.899 align:middle line:79% position:50% size:58% +The reports you have +so far will stay valid. + +00:10:06.966 --> 00:10:10.500 align:middle line:79% position:50% size:70% +So, yeah, so the +functionality, the importing + +00:10:10.566 --> 00:10:14.899 align:middle line:79% position:50% size:75% +and the opening and closing +will be the same, and the JSON + +00:10:14.966 --> 00:10:18.866 align:middle line:85% position:50% size:73% +format will remain unchanged. + +00:10:18.933 --> 00:10:20.333 align:middle line:85% position:50% size:13% +Okay. + +00:10:20.399 --> 00:10:25.366 align:middle line:79% position:50% size:65% +Another question that came +also in the -- similar + +00:10:25.433 --> 00:10:28.433 align:middle line:79% position:50% size:43% +question was on +the registration. + +00:10:28.500 --> 00:10:33.799 align:middle line:79% position:50% size:75% +Can you export -- so we can +now export an HTML and in JSON + +00:10:33.866 --> 00:10:36.700 align:middle line:85% position:50% size:70% +format for machine readable. + +00:10:36.766 --> 00:10:39.933 align:middle line:79% position:50% size:68% +Are there other formats, +for example, pdf or Word or + +00:10:40.000 --> 00:10:42.000 align:middle line:85% position:50% size:48% +Excel or something? + +00:10:44.333 --> 00:10:45.566 align:middle line:85% position:50% size:63% +>> ERIC VELLEMAN: No. No. + +00:10:45.633 --> 00:10:47.766 align:middle line:79% position:50% size:48% +Just HTML as far +as I know and JSON. + +00:10:47.833 --> 00:10:48.966 align:middle line:85% position:50% size:68% +>> SHADI ABOU-ZAHRA: Right. + +00:10:49.033 --> 00:10:52.733 align:middle line:79% position:50% size:75% +We do have GitHub where you +can put wish list requirements + +00:10:52.799 --> 00:10:53.966 align:middle line:85% position:50% size:58% +or you can open issues. + +00:10:54.033 --> 00:10:59.000 align:middle line:79% position:50% size:75% +Also, if you find bugs, please +do report these, and we will + +00:10:59.066 --> 00:11:03.533 align:middle line:79% position:50% size:63% +-- yeah, we will see what +we can do in the future. + +00:11:03.600 --> 00:11:08.200 align:middle line:79% position:50% size:70% +But right now, yeah, we do +not export to other formats. + +00:11:08.266 --> 00:11:10.833 align:middle line:79% position:50% size:73% +There was a question: Can you +show an example of importing + +00:11:10.899 --> 00:11:12.600 align:middle line:85% position:50% size:48% +an Axe EARL report? + +00:11:12.666 --> 00:11:14.600 align:middle line:79% position:50% size:68% +So this was about reporting +from -- importing from + +00:11:14.666 --> 00:11:17.933 align:middle line:85% position:50% size:40% +automated tools. + +00:11:18.000 --> 00:11:25.333 align:middle line:79% position:50% size:65% +So on -- I think you said +you don't want to show the + +00:11:25.399 --> 00:11:28.966 align:middle line:79% position:50% size:58% +files on your computer, +which is fair enough. + +00:11:29.033 --> 00:11:34.899 align:middle line:79% position:50% size:73% +I think the best we can do is +there's -- the EARL GitHub -- + +00:11:34.966 --> 00:11:38.733 align:middle line:79% position:50% size:63% +and I'll give the URL +later to the EARL GitHub. + +00:11:38.799 --> 00:11:41.766 align:middle line:79% position:50% size:68% +I think it should be linked +from the WAI-Tools Project + +00:11:41.833 --> 00:11:46.633 align:middle line:79% position:50% size:68% +page as well, and there -- +there should be some sample + +00:11:46.700 --> 00:11:51.733 align:middle line:79% position:50% size:55% +reports there that you +can use from there. + +00:11:51.799 --> 00:11:53.366 align:middle line:85% position:50% size:13% +Okay. + +00:11:53.433 --> 00:11:56.000 align:middle line:79% position:50% size:55% +Eric, does the report +generator evaluate web + +00:11:56.066 --> 00:11:58.066 align:middle line:85% position:50% size:38% +or mobile apps? + +00:11:58.966 --> 00:12:00.366 align:middle line:85% position:50% size:55% +>> ERIC VELLEMAN: Web. + +00:12:03.399 --> 00:12:06.833 align:middle line:79% position:50% size:73% +>> SHADI ABOU-ZAHRA: And it's +WCAG, so it's independent. + +00:12:06.899 --> 00:12:09.866 align:middle line:79% position:50% size:68% +So, actually, the important +thing is this is not an + +00:12:09.933 --> 00:12:14.466 align:middle line:79% position:50% size:75% +automated tool, so it actually +uses all the criteria, so you + +00:12:14.533 --> 00:12:20.866 align:middle line:79% position:50% size:70% +can use it just as well to +create reports for the apps. + +00:12:20.933 --> 00:12:22.333 align:middle line:79% position:50% size:70% +>> ERIC VELLEMAN: At +Accessibility Foundation, we + +00:12:22.399 --> 00:12:27.733 align:middle line:79% position:50% size:43% +use the same tool +to evaluate apps. + +00:12:27.799 --> 00:12:29.133 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Right. Good. + +00:12:29.200 --> 00:12:30.200 align:middle line:85% position:50% size:33% +Good to hear. + +00:12:30.266 --> 00:12:32.266 align:middle line:79% position:50% size:65% +Wow, there's a whole bunch +of questions coming in. + +00:12:32.333 --> 00:12:35.233 align:middle line:85% position:50% size:48% +We want to move on. + +00:12:35.299 --> 00:12:39.466 align:middle line:79% position:50% size:53% +When this new version +will be released? + +00:12:39.533 --> 00:12:41.633 align:middle line:85% position:50% size:75% +That's a really good question. + +00:12:41.700 --> 00:12:43.033 align:middle line:85% position:50% size:13% +Yeah. + +00:12:43.100 --> 00:12:45.533 align:middle line:79% position:50% size:48% +We hope in the +coming week or two. + +00:12:45.600 --> 00:12:47.200 align:middle line:85% position:50% size:45% +So please hang on. + +00:12:47.266 --> 00:12:49.700 align:middle line:79% position:50% size:53% +We'll let you know as +quickly as possible. + +00:12:49.766 --> 00:12:51.966 align:middle line:79% position:50% size:75% +At the end of the presentation +today, at the end of the + +00:12:52.033 --> 00:12:54.633 align:middle line:79% position:50% size:68% +session, I'll be giving you +information on how you can + +00:12:54.700 --> 00:12:59.500 align:middle line:79% position:50% size:73% +receive updates, how you can +be notified, and so we'll let + +00:12:59.566 --> 00:13:03.466 align:middle line:79% position:50% size:63% +you know as soon as the +new version is available. + +00:13:03.533 --> 00:13:06.600 align:middle line:79% position:50% size:70% +Another question: Austria +and Spain uses the tool from + +00:13:06.666 --> 00:13:08.966 align:middle line:79% position:50% size:43% +monitoring bodies +and make changes. + +00:13:09.033 --> 00:13:11.433 align:middle line:79% position:50% size:68% +Is there any exchange +between your organizations? + +00:13:11.500 --> 00:13:19.100 align:middle line:79% position:50% size:70% +So, yes, we did actually ask +for inputs through GitHub. + +00:13:19.166 --> 00:13:24.433 align:middle line:79% position:50% size:75% +And I believe we got comments +from Austria, I believe, maybe + +00:13:24.500 --> 00:13:26.366 align:middle line:85% position:50% size:68% +directly, maybe indirectly. + +00:13:26.433 --> 00:13:31.633 align:middle line:79% position:50% size:65% +And so, yeah, we developed +this, you know, openly. + +00:13:31.700 --> 00:13:35.799 align:middle line:79% position:50% size:75% +Of course, we always love more +input so that we can consider + +00:13:35.866 --> 00:13:38.033 align:middle line:79% position:50% size:48% +any requirements +in future versions. + +00:13:40.433 --> 00:13:43.000 align:middle line:85% position:50% size:55% +Then another question. + +00:13:43.066 --> 00:13:46.366 align:middle line:79% position:50% size:60% +Would it be easy to add +non-WCAG criteria to the + +00:13:46.433 --> 00:13:47.600 align:middle line:85% position:50% size:43% +report generator? + +00:13:47.666 --> 00:13:51.133 align:middle line:79% position:50% size:75% +For example, other clauses +from the EN, from the European + +00:13:51.200 --> 00:13:56.666 align:middle line:85% position:50% size:65% +standard of the directive. + +00:13:56.733 --> 00:13:58.899 align:middle line:79% position:50% size:75% +>> ERIC VELLEMAN: Well, I +mean, if you have a programmer + +00:13:58.966 --> 00:14:01.133 align:middle line:79% position:50% size:50% +then I think it +would be easy maybe. + +00:14:01.200 --> 00:14:03.866 align:middle line:85% position:50% size:43% +But it's -- yeah. + +00:14:03.933 --> 00:14:04.899 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +It's open source. + +00:14:04.966 --> 00:14:08.333 align:middle line:79% position:50% size:63% +So, yeah, you +would be -- yeah. It's -- + +00:14:08.399 --> 00:14:09.933 align:middle line:85% position:50% size:80% +>> ERIC VELLEMAN: All on GitHub. + +00:14:10.000 --> 00:14:11.566 align:middle line:79% position:50% size:73% +>> SHADI ABOU-ZAHRA: Not out +of the box, but you should be + +00:14:11.633 --> 00:14:17.133 align:middle line:85% position:50% size:65% +able to adapt the -- okay. + +00:14:17.200 --> 00:14:19.299 align:middle line:79% position:50% size:68% +And then last question +before we move to your next + +00:14:19.366 --> 00:14:20.700 align:middle line:85% position:50% size:48% +presentation, Eric. + +00:14:20.766 --> 00:14:23.700 align:middle line:79% position:50% size:75% +We often have multiple +analysts working on evaluating + +00:14:23.766 --> 00:14:25.066 align:middle line:85% position:50% size:60% +a site at the same time. + +00:14:25.133 --> 00:14:28.133 align:middle line:79% position:50% size:73% +Is there any way to break out +of a page for evaluation and + +00:14:28.200 --> 00:14:31.700 align:middle line:79% position:50% size:70% +upload these results back to +the main evaluation so they + +00:14:31.766 --> 00:14:35.600 align:middle line:79% position:50% size:70% +could work in tandem so that +somebody -- I understand the + +00:14:35.666 --> 00:14:38.299 align:middle line:79% position:50% size:68% +question that somebody +evaluates part and then the + +00:14:38.366 --> 00:14:39.933 align:middle line:79% position:50% size:53% +other evaluates other +parts and then we can + +00:14:40.000 --> 00:14:42.733 align:middle line:85% position:50% size:50% +combine the reports. + +00:14:42.799 --> 00:14:47.066 align:middle line:79% position:50% size:70% +>> ERIC VELLEMAN: No, not if +do you that simultaneously. + +00:14:47.133 --> 00:14:51.600 align:middle line:79% position:50% size:70% +So if somebody stops +evaluating and another takes + +00:14:51.666 --> 00:14:55.600 align:middle line:79% position:50% size:70% +up, then you can send each +other the JSON file, and the + +00:14:55.666 --> 00:14:57.333 align:middle line:85% position:50% size:58% +other one can continue. + +00:14:57.399 --> 00:15:01.966 align:middle line:79% position:50% size:63% +But if you -- as far as I +know, if you both do the + +00:15:02.033 --> 00:15:05.100 align:middle line:79% position:50% size:40% +evaluation, you +import the file. + +00:15:05.166 --> 00:15:08.766 align:middle line:79% position:50% size:45% +I'm not quite sure +what happens then. + +00:15:08.833 --> 00:15:11.700 align:middle line:79% position:50% size:43% +So could be +that it is added. + +00:15:11.766 --> 00:15:13.433 align:middle line:85% position:50% size:10% +Hmm. + +00:15:13.500 --> 00:15:17.100 align:middle line:79% position:50% size:58% +I'll put it on the list +of things to check. + +00:15:17.166 --> 00:15:19.666 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Right. Yeah. + +00:15:19.733 --> 00:15:22.500 align:middle line:79% position:50% size:73% +And then information in +Germany, we also use the tool + +00:15:22.566 --> 00:15:25.833 align:middle line:79% position:50% size:53% +and make changes, but +we're still at work. + +00:15:25.899 --> 00:15:30.333 align:middle line:79% position:50% size:73% +So, obviously, using that and +making changes to it, yeah, + +00:15:30.399 --> 00:15:33.166 align:middle line:79% position:50% size:73% +welcome to -- we also welcome +translations -- + +00:15:33.233 --> 00:15:35.033 align:middle line:85% position:50% size:55% +>> ERIC VELLEMAN: Yep. + +00:15:35.100 --> 00:15:37.000 align:middle line:79% position:50% size:68% +>> SHADI ABOU-ZAHRA: -- for +all resources that + +00:15:37.066 --> 00:15:38.466 align:middle line:85% position:50% size:50% +you've talked about. + +00:15:38.533 --> 00:15:40.633 align:middle line:79% position:50% size:70% +There were some questions on +translations that I'll come + +00:15:40.700 --> 00:15:43.933 align:middle line:79% position:50% size:43% +back to later +again in the Q&A. + +00:15:44.000 --> 00:15:45.533 align:middle line:85% position:50% size:13% +Yeah. + +00:15:45.600 --> 00:15:53.333 align:middle line:79% position:50% size:63% +So -- oh, and somebody -- +at the moment we add + +00:15:53.399 --> 00:15:55.733 align:middle line:85% position:50% size:48% +an Excel converter. + +00:15:55.799 --> 00:15:58.966 align:middle line:79% position:50% size:70% +So that's nice to hear that +there's something like that. + +00:15:59.033 --> 00:16:01.333 align:middle line:79% position:50% size:73% +Again, this is an open-source +project, so you're always + +00:16:01.399 --> 00:16:06.100 align:middle line:79% position:50% size:65% +welcome to contribute +things back into GitHub if + +00:16:06.166 --> 00:16:07.133 align:middle line:85% position:50% size:35% +you wanted to. + +00:16:07.200 --> 00:16:11.533 align:middle line:79% position:50% size:68% +There are several issues in +GitHub of people asking to + +00:16:11.600 --> 00:16:15.966 align:middle line:79% position:50% size:75% +have, for example, this +collaborative so that multiple + +00:16:16.033 --> 00:16:17.733 align:middle line:79% position:50% size:43% +people can edit +at the same time. + +00:16:17.799 --> 00:16:20.733 align:middle line:79% position:50% size:75% +We know that this is a feature +request, but that's actually + +00:16:20.799 --> 00:16:23.366 align:middle line:85% position:50% size:68% +quite a project on its own. + +00:16:23.433 --> 00:16:28.266 align:middle line:79% position:50% size:75% +For now we started really with +just this import to allow when + +00:16:28.333 --> 00:16:35.600 align:middle line:79% position:50% size:70% +you select a sample of pages +from your site and you have + +00:16:35.666 --> 00:16:39.799 align:middle line:79% position:50% size:70% +automated results, you can +fill in what the tool knows, + +00:16:39.866 --> 00:16:42.133 align:middle line:79% position:50% size:63% +but you still need to +continue doing the manual + +00:16:42.200 --> 00:16:43.966 align:middle line:85% position:50% size:45% +part of your work. + +00:16:44.033 --> 00:16:49.166 align:middle line:79% position:50% size:73% +But it helps combine +automated and manual testing. + +00:16:49.233 --> 00:16:50.233 align:middle line:85% position:50% size:13% +Okay. + +00:16:50.299 --> 00:16:52.399 align:middle line:79% position:50% size:70% +We're a bit behind schedule, +Eric, so please go + +00:16:52.466 --> 00:16:57.100 align:middle line:85% position:50% size:48% +ahead and continue. + +00:16:57.166 --> 00:16:58.633 align:middle line:85% position:50% size:58% +>> ERIC VELLEMAN: Okay. + +00:16:58.700 --> 00:17:02.133 align:middle line:79% position:50% size:45% +Let me get my +presentation back. + +00:17:02.200 --> 00:17:05.933 align:middle line:85% position:50% size:33% +Get you back. + +00:17:06.000 --> 00:17:07.700 align:middle line:85% position:50% size:33% +Share screen. + +00:17:11.666 --> 00:17:15.866 align:middle line:85% position:50% size:40% +So there we are. + +00:17:15.933 --> 00:17:17.599 align:middle line:85% position:50% size:73% +The accessibility statements. + +00:17:17.666 --> 00:17:22.033 align:middle line:79% position:50% size:75% +Well, this is a shorter story +because we did a lot more work + +00:17:22.099 --> 00:17:27.700 align:middle line:79% position:50% size:75% +on the evaluation tool, and +not just adding translation -- + +00:17:27.766 --> 00:17:30.099 align:middle line:79% position:50% size:68% +the possibility of easy +translation there, but also + +00:17:30.166 --> 00:17:35.433 align:middle line:79% position:50% size:73% +adding a lot of functionality +like the imports and like the + +00:17:35.500 --> 00:17:38.500 align:middle line:79% position:50% size:50% +saving and opening +files and to be able + +00:17:38.566 --> 00:17:41.200 align:middle line:85% position:50% size:53% +to share evaluations. + +00:17:41.266 --> 00:17:44.433 align:middle line:79% position:50% size:68% +So for the accessibility +statements generator, it is + +00:17:44.500 --> 00:17:48.666 align:middle line:79% position:50% size:75% +still the version that you can +see live on the W3 website. + +00:17:48.733 --> 00:17:51.900 align:middle line:79% position:50% size:70% +If you search for statements +generator on W3, you'll + +00:17:51.966 --> 00:17:53.500 align:middle line:85% position:50% size:50% +find it immediately. + +00:17:53.566 --> 00:18:00.366 align:middle line:79% position:50% size:68% +It's at W3.org/WAI -- W-A-I +-- /planning/statements + +00:18:00.433 --> 00:18:04.233 align:middle line:85% position:50% size:25% +and slash. + +00:18:04.299 --> 00:18:07.466 align:middle line:79% position:50% size:68% +And there's a page there to +generate an accessibility + +00:18:07.533 --> 00:18:12.200 align:middle line:79% position:50% size:65% +statement with a lot of +information on the Preview + +00:18:12.266 --> 00:18:15.233 align:middle line:79% position:50% size:73% +button, but you still have to +fill in everything by hand. + +00:18:15.299 --> 00:18:19.866 align:middle line:79% position:50% size:73% +So what the generator does is +it guides you on providing + +00:18:19.933 --> 00:18:21.900 align:middle line:85% position:50% size:63% +accessibility statements. + +00:18:21.966 --> 00:18:25.066 align:middle line:79% position:50% size:73% +It is aligned with the EU Web +Accessibility Directive and + +00:18:25.133 --> 00:18:27.333 align:middle line:79% position:50% size:48% +with WCAG 2 +Conformance Claims. + +00:18:27.400 --> 00:18:30.333 align:middle line:79% position:50% size:63% +It is a manual generator, +so this one does not do + +00:18:30.400 --> 00:18:32.366 align:middle line:85% position:50% size:53% +any automatic checks. + +00:18:32.433 --> 00:18:37.633 align:middle line:79% position:50% size:65% +But you could link to the +evaluation reports or to a + +00:18:37.700 --> 00:18:42.766 align:middle line:79% position:50% size:65% +JSON file, but you output +it from the previous tool. + +00:18:42.833 --> 00:18:44.633 align:middle line:79% position:50% size:58% +The output is available +in both human and + +00:18:44.700 --> 00:18:46.200 align:middle line:85% position:50% size:63% +machine-readable formats. + +00:18:46.266 --> 00:18:49.466 align:middle line:79% position:50% size:73% +The tool includes examples of +statements, guidance, how to + +00:18:49.533 --> 00:18:51.200 align:middle line:85% position:50% size:75% +make accessibility statements. + +00:18:51.266 --> 00:18:53.033 align:middle line:79% position:50% size:55% +Most people are, like, +what is important for + +00:18:53.099 --> 00:18:54.966 align:middle line:85% position:50% size:63% +people with disabilities? + +00:18:55.033 --> 00:18:57.233 align:middle line:79% position:50% size:68% +What would they like to see +in a statement like that? + +00:18:57.299 --> 00:18:59.933 align:middle line:79% position:50% size:40% +And a statements +generator tool. + +00:19:00.000 --> 00:19:02.900 align:middle line:79% position:50% size:70% +It includes input from +communities and experts, and + +00:19:02.966 --> 00:19:07.599 align:middle line:79% position:50% size:70% +it is almost ready for +translation to your language + +00:19:07.666 --> 00:19:12.333 align:middle line:79% position:50% size:68% +because we're still working +the last bits of that. + +00:19:12.400 --> 00:19:17.166 align:middle line:79% position:50% size:70% +So, also, in, like, a week +or two, this should be done. + +00:19:17.233 --> 00:19:20.500 align:middle line:79% position:50% size:75% +And then you could just take +one file, translate everything + +00:19:20.566 --> 00:19:25.433 align:middle line:79% position:50% size:75% +in that file, and you would +immediately have your language + +00:19:25.500 --> 00:19:28.433 align:middle line:79% position:50% size:70% +version of the accessibility +statements generator. + +00:19:28.500 --> 00:19:31.866 align:middle line:79% position:50% size:73% +The only thing that has to be +done is that somebody at W3C + +00:19:31.933 --> 00:19:37.833 align:middle line:79% position:50% size:75% +will then have to put the +translated file into a folder. + +00:19:37.900 --> 00:19:40.266 align:middle line:79% position:50% size:68% +So why would you provide an +accessibility statement? + +00:19:40.333 --> 00:19:45.433 align:middle line:79% position:50% size:70% +Well, that is to show users +that you care about them and + +00:19:45.500 --> 00:19:48.166 align:middle line:79% position:50% size:65% +about accessibility, to +provide them with valuable + +00:19:48.233 --> 00:19:53.333 align:middle line:79% position:50% size:73% +help, for CSR reasons, or it +may be a requirement, like in + +00:19:53.400 --> 00:19:57.133 align:middle line:79% position:50% size:63% +Europe for the Directive, +for public sector bodies. + +00:19:57.200 --> 00:20:00.033 align:middle line:79% position:50% size:43% +And what does +the tool then do? + +00:20:00.099 --> 00:20:04.133 align:middle line:79% position:50% size:73% +Well, the tool asks you the +questions you need to answer. + +00:20:04.200 --> 00:20:07.766 align:middle line:79% position:50% size:65% +So it asks you basic +information, your efforts, + +00:20:07.833 --> 00:20:11.933 align:middle line:79% position:50% size:68% +technical information about +the accessibility, and + +00:20:12.000 --> 00:20:14.866 align:middle line:79% position:50% size:58% +regarding the approval +and complaints process. + +00:20:14.933 --> 00:20:16.933 align:middle line:79% position:50% size:70% +It helps create an +accessibility statement that + +00:20:17.000 --> 00:20:20.000 align:middle line:79% position:50% size:63% +can be further customized +and branded, et cetera. + +00:20:20.066 --> 00:20:25.433 align:middle line:79% position:50% size:75% +So you can just save the end +result and make your very much + +00:20:25.500 --> 00:20:28.700 align:middle line:79% position:50% size:55% +most -- more beautiful +version out of it. + +00:20:28.766 --> 00:20:31.299 align:middle line:79% position:50% size:68% +It helps make accessibility +statements conformant with + +00:20:31.366 --> 00:20:33.066 align:middle line:85% position:50% size:43% +the EU Directive. + +00:20:33.133 --> 00:20:35.799 align:middle line:79% position:50% size:63% +And the output is not for +lawyers, but for users + +00:20:35.866 --> 00:20:38.033 align:middle line:85% position:50% size:40% +of your content. + +00:20:38.099 --> 00:20:41.500 align:middle line:79% position:50% size:70% +So make sure they understand +so they know what to do. + +00:20:41.566 --> 00:20:43.566 align:middle line:79% position:50% size:60% +That was the idea behind +the accessibility + +00:20:43.633 --> 00:20:45.533 align:middle line:85% position:50% size:50% +statement generator. + +00:20:45.599 --> 00:20:47.566 align:middle line:79% position:50% size:50% +And the future work +is the translations. + +00:20:47.633 --> 00:20:49.933 align:middle line:85% position:50% size:60% +We're working on it now. + +00:20:50.000 --> 00:20:53.033 align:middle line:79% position:50% size:58% +And we also looked into +the possibility to add + +00:20:53.099 --> 00:20:55.700 align:middle line:85% position:50% size:48% +your own questions. + +00:20:55.766 --> 00:20:57.333 align:middle line:85% position:50% size:68% +That was one of the issues. + +00:20:57.400 --> 00:20:59.633 align:middle line:79% position:50% size:65% +So if you go to the GitHub +page, you'll find a lot of + +00:20:59.700 --> 00:21:02.466 align:middle line:79% position:50% size:45% +proposals there +for changes to the + +00:21:02.533 --> 00:21:04.166 align:middle line:79% position:50% size:60% +accessibility statements +generator. + +00:21:04.233 --> 00:21:06.033 align:middle line:85% position:50% size:53% +We had to prioritize. + +00:21:06.099 --> 00:21:10.333 align:middle line:79% position:50% size:75% +So besides a few other things, +the most important thing was + +00:21:10.400 --> 00:21:12.000 align:middle line:85% position:50% size:43% +the translations. + +00:21:12.066 --> 00:21:14.066 align:middle line:85% position:50% size:65% +But all the code is there. + +00:21:14.133 --> 00:21:17.533 align:middle line:79% position:50% size:70% +So, also, in this case you +could take the complete code + +00:21:17.599 --> 00:21:19.066 align:middle line:85% position:50% size:68% +and add your own questions. + +00:21:19.133 --> 00:21:21.000 align:middle line:79% position:50% size:70% +And I think in the case of +the accessibility statements + +00:21:21.066 --> 00:21:25.133 align:middle line:79% position:50% size:78% +generator, that would really +be a fairly simple thing to do. + +00:21:25.200 --> 00:21:28.099 align:middle line:79% position:50% size:65% +I think even if you're not +really extremely technical + +00:21:28.166 --> 00:21:31.400 align:middle line:85% position:50% size:68% +you could probably do that. + +00:21:31.466 --> 00:21:32.700 align:middle line:85% position:50% size:13% +Okay. + +00:21:32.766 --> 00:21:34.900 align:middle line:85% position:50% size:55% +Now for the live demo. + +00:21:34.966 --> 00:21:37.233 align:middle line:85% position:50% size:38% +Let me go here. + +00:21:37.299 --> 00:21:41.733 align:middle line:79% position:50% size:58% +So this is the live +page on the W3 website. + +00:21:41.799 --> 00:21:48.366 align:middle line:79% position:50% size:78% +And it's at +W3.org/WAI/planning/statements. + +00:21:48.433 --> 00:21:53.200 align:middle line:79% position:50% size:70% +And then if you go to +/generator and then slash -- + +00:21:53.266 --> 00:21:57.333 align:middle line:79% position:50% size:65% +well, then you will get to +this create page, where it + +00:21:57.400 --> 00:22:00.366 align:middle line:79% position:50% size:65% +asks you basic information +like the name of your + +00:22:00.433 --> 00:22:04.599 align:middle line:79% position:50% size:63% +organization, which could +be like this, and then a + +00:22:04.666 --> 00:22:08.133 align:middle line:79% position:50% size:60% +telephone number or +address of your website. + +00:22:08.200 --> 00:22:10.066 align:middle line:85% position:50% size:63% +That's not a correct one. + +00:22:10.133 --> 00:22:13.166 align:middle line:79% position:50% size:60% +Over here the standards +applied, the conformance + +00:22:13.233 --> 00:22:16.799 align:middle line:79% position:50% size:55% +status, the additional +considerations. + +00:22:18.700 --> 00:22:21.933 align:middle line:85% position:50% size:55% +Don't send them to me. + +00:22:22.000 --> 00:22:22.966 align:middle line:85% position:50% size:25% +Et cetera. + +00:22:23.033 --> 00:22:27.766 align:middle line:79% position:50% size:65% +So there's the efforts you +did, like organizational + +00:22:27.833 --> 00:22:30.900 align:middle line:79% position:50% size:60% +measures you took or any +other measures, you can + +00:22:30.966 --> 00:22:33.133 align:middle line:85% position:50% size:35% +add them here. + +00:22:33.200 --> 00:22:39.599 align:middle line:79% position:50% size:75% +Technical information like the +accessibility limitations, + +00:22:39.666 --> 00:22:42.133 align:middle line:79% position:50% size:58% +compatibility with user +environment, et cetera. + +00:22:42.200 --> 00:22:45.933 align:middle line:79% position:50% size:60% +So it's all -- this is +all described in detail. + +00:22:46.000 --> 00:22:52.233 align:middle line:79% position:50% size:70% +If you show info here, below +every header or form fields, + +00:22:52.299 --> 00:22:54.900 align:middle line:79% position:50% size:65% +there is -- or above every +form field, there is more + +00:22:54.966 --> 00:22:59.666 align:middle line:79% position:50% size:60% +information about what +you could fill in there. + +00:22:59.733 --> 00:23:02.099 align:middle line:79% position:50% size:40% +The technologies +used, et cetera. + +00:23:02.166 --> 00:23:05.000 align:middle line:79% position:50% size:68% +Then somewhere at the +bottom you can preview your + +00:23:05.066 --> 00:23:07.400 align:middle line:79% position:50% size:60% +accessibility statement, +and it takes you to your + +00:23:07.466 --> 00:23:10.400 align:middle line:85% position:50% size:60% +accessibility statement. + +00:23:10.466 --> 00:23:12.033 align:middle line:79% position:50% size:48% +So I haven't filled +in anything, so now + +00:23:12.099 --> 00:23:14.299 align:middle line:85% position:50% size:45% +it's website name. + +00:23:14.366 --> 00:23:20.533 align:middle line:79% position:50% size:53% +But if I would go +back, let's see here. + +00:23:20.599 --> 00:23:24.166 align:middle line:79% position:50% size:48% +I will put at least +a name in here. + +00:23:24.233 --> 00:23:26.733 align:middle line:79% position:50% size:43% +Oh, I did put +the name in here. + +00:23:32.033 --> 00:23:34.000 align:middle line:85% position:50% size:60% +Terrible things of life. + +00:23:36.799 --> 00:23:38.599 align:middle line:85% position:50% size:13% +Okay. + +00:23:38.666 --> 00:23:40.633 align:middle line:85% position:50% size:43% +Well, here it is. + +00:23:40.700 --> 00:23:43.066 align:middle line:79% position:50% size:43% +So I probably +forgot something. + +00:23:43.133 --> 00:23:44.900 align:middle line:79% position:50% size:58% +It does the description +of the name. + +00:23:44.966 --> 00:23:48.766 align:middle line:85% position:50% size:65% +So it's a few forms later. + +00:23:48.833 --> 00:23:52.500 align:middle line:79% position:50% size:70% +So it will generate -- +everything that you fill out + +00:23:52.566 --> 00:23:55.366 align:middle line:85% position:50% size:73% +will be appearing below here. + +00:23:55.433 --> 00:23:57.966 align:middle line:79% position:50% size:75% +So that is why it is currently +very short, but the more + +00:23:58.033 --> 00:24:00.799 align:middle line:79% position:50% size:68% +information you input, the +more information you get in + +00:24:00.866 --> 00:24:04.466 align:middle line:85% position:50% size:73% +this accessibility statement. + +00:24:04.533 --> 00:24:07.599 align:middle line:85% position:50% size:25% +That's it. + +00:24:07.666 --> 00:24:10.266 align:middle line:79% position:50% size:68% +And then above here -- of +course, before I say that's + +00:24:10.333 --> 00:24:12.766 align:middle line:79% position:50% size:73% +it, I should have said +somewhere here above you will + +00:24:12.833 --> 00:24:17.366 align:middle line:79% position:50% size:70% +soon see this translation +button where you can get the + +00:24:17.433 --> 00:24:21.666 align:middle line:79% position:50% size:68% +file, translate it to your +language, and then see your + +00:24:21.733 --> 00:24:25.466 align:middle line:79% position:50% size:70% +own language version of this +accessibility statement. + +00:24:25.533 --> 00:24:29.566 align:middle line:85% position:50% size:13% +Yeah. + +00:24:29.633 --> 00:24:31.566 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Thank you, Eric. + +00:24:31.633 --> 00:24:32.466 align:middle line:85% position:50% size:13% +Yeah. + +00:24:32.533 --> 00:24:33.799 align:middle line:85% position:50% size:43% +So right on time. + +00:24:33.866 --> 00:24:36.033 align:middle line:79% position:50% size:73% +We have a couple of questions +that I think are a little bit + +00:24:36.099 --> 00:24:39.233 align:middle line:79% position:50% size:68% +beyond the scope of this +project, but they're really + +00:24:39.299 --> 00:24:40.400 align:middle line:85% position:50% size:38% +good questions. + +00:24:40.466 --> 00:24:43.866 align:middle line:79% position:50% size:68% +So one question is: I've +seen public sector websites + +00:24:43.933 --> 00:24:47.366 align:middle line:79% position:50% size:65% +publishing accessibility +statements only to conform + +00:24:47.433 --> 00:24:49.200 align:middle line:85% position:50% size:43% +with regulations. + +00:24:49.266 --> 00:24:52.533 align:middle line:79% position:50% size:60% +In those statements they +say that some parts + +00:24:52.599 --> 00:24:54.733 align:middle line:85% position:50% size:48% +are not accessible. + +00:24:54.799 --> 00:24:57.266 align:middle line:79% position:50% size:58% +What's the use of +having such statements? + +00:24:57.333 --> 00:24:58.933 align:middle line:79% position:50% size:53% +So, yeah, that's a +really good question. + +00:24:59.000 --> 00:25:03.500 align:middle line:79% position:50% size:75% +And this is one of the reasons +why, you know, in addition to + +00:25:03.566 --> 00:25:08.466 align:middle line:79% position:50% size:70% +the generator, we have +guidance, including examples + +00:25:08.533 --> 00:25:12.233 align:middle line:79% position:50% size:70% +of accessibility statements, +to really try to + +00:25:12.299 --> 00:25:14.166 align:middle line:85% position:50% size:55% +promote good practice. + +00:25:14.233 --> 00:25:18.133 align:middle line:79% position:50% size:68% +But, yeah, this issue, I +guess, of what people do or + +00:25:18.200 --> 00:25:21.833 align:middle line:79% position:50% size:53% +don't is a little +bit beyond our scope. + +00:25:21.900 --> 00:25:25.433 align:middle line:79% position:50% size:65% +Another question that also +Jorge might be interested + +00:25:25.500 --> 00:25:26.666 align:middle line:85% position:50% size:30% +to weigh in. + +00:25:26.733 --> 00:25:30.200 align:middle line:79% position:50% size:68% +It was saying: I was +interested to learn that in + +00:25:30.266 --> 00:25:33.700 align:middle line:79% position:50% size:68% +Portugal there's a proposed +standard URL pass for + +00:25:33.766 --> 00:25:36.333 align:middle line:85% position:50% size:63% +accessibility statements. + +00:25:36.400 --> 00:25:40.133 align:middle line:79% position:50% size:73% +So on a public website if you +write "acessibilidade" in the + +00:25:40.200 --> 00:25:44.766 align:middle line:79% position:50% size:60% +URL, you should get the +accessibility statement. + +00:25:44.833 --> 00:25:48.400 align:middle line:79% position:50% size:73% +Are there more countries with +such standard locations? + +00:25:48.466 --> 00:25:52.966 align:middle line:79% position:50% size:73% +Eric, first, are you aware of +other countries that have a + +00:25:53.033 --> 00:25:54.433 align:middle line:85% position:50% size:43% +similar practice? + +00:25:54.500 --> 00:25:57.066 align:middle line:79% position:50% size:75% +>> ERIC VELLEMAN: Yeah. I know +that the Netherlands has a + +00:25:57.133 --> 00:25:59.166 align:middle line:79% position:50% size:58% +standard location where +you can find all the + +00:25:59.233 --> 00:26:00.666 align:middle line:85% position:50% size:63% +accessibility statements. + +00:26:00.733 --> 00:26:04.599 align:middle line:79% position:50% size:78% +That is at the +toegankelijkheidsverklaring.nl. + +00:26:04.666 --> 00:26:08.900 align:middle line:79% position:50% size:55% +And you can find +enough of you of 2,500 + +00:26:08.966 --> 00:26:12.966 align:middle line:85% position:50% size:70% +toegankelijkheidsverklaring. + +00:26:13.033 --> 00:26:15.433 align:middle line:79% position:50% size:73% +And that is also the place -- +sometimes they're not even on + +00:26:15.500 --> 00:26:18.166 align:middle line:79% position:50% size:73% +the website or you can't find +them on the website of the + +00:26:18.233 --> 00:26:22.033 align:middle line:79% position:50% size:73% +public sector body, but you +can find them on this website + +00:26:22.099 --> 00:26:27.200 align:middle line:79% position:50% size:53% +that is hosted by the +central government. + +00:26:27.266 --> 00:26:34.633 align:middle line:79% position:50% size:68% +>> SHADI ABOU-ZAHRA: Jorge, +any other thoughts on this? + +00:26:34.700 --> 00:26:36.866 align:middle line:79% position:50% size:68% +>> JORGE FERNANDES: Well, I +don't know other countries + +00:26:36.933 --> 00:26:43.000 align:middle line:79% position:50% size:48% +where they use also +the standard URL. + +00:26:43.066 --> 00:26:47.866 align:middle line:79% position:50% size:55% +But, yes, in Portugal +we are using this one. + +00:26:47.933 --> 00:26:52.700 align:middle line:79% position:50% size:68% +For example, I know that in +Italy they centralized the + +00:26:52.766 --> 00:26:58.766 align:middle line:79% position:50% size:70% +accessibility statement +in a website, and you make a + +00:26:58.833 --> 00:27:02.400 align:middle line:85% position:50% size:53% +link to that website. + +00:27:02.466 --> 00:27:07.799 align:middle line:79% position:50% size:75% +The accessibility statement is +not on the website of the -- + +00:27:07.866 --> 00:27:14.033 align:middle line:79% position:50% size:73% +of the public sector bodies, +so it is outside, centralized + +00:27:14.099 --> 00:27:23.400 align:middle line:79% position:50% size:58% +in a server managed by +the national authority. + +00:27:23.466 --> 00:27:29.500 align:middle line:79% position:50% size:63% +But I don't know other -- +or other examples. + +00:27:29.566 --> 00:27:30.700 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Thank you. Yeah. + +00:27:30.766 --> 00:27:33.000 align:middle line:79% position:50% size:55% +And that goes directly +to the next question. + +00:27:33.066 --> 00:27:36.866 align:middle line:79% position:50% size:70% +Do all countries adopt this +statement generator, knowing + +00:27:36.933 --> 00:27:40.733 align:middle line:79% position:50% size:73% +that some countries in the EU +have additional requirements, + +00:27:40.799 --> 00:27:43.299 align:middle line:79% position:50% size:48% +for example, France +and Netherlands? + +00:27:43.366 --> 00:27:46.366 align:middle line:79% position:50% size:48% +The latter also has +its own generator. + +00:27:46.433 --> 00:27:49.966 align:middle line:79% position:50% size:68% +And then there is the Dutch +name, which I cannot read. + +00:27:50.033 --> 00:27:53.033 align:middle line:79% position:50% size:78% +>> ERIC VELLEMAN: +Toegankelijkheidsverklaring.nl. + +00:27:53.099 --> 00:27:56.966 align:middle line:79% position:50% size:73% +>> SHADI ABOU-ZAHRA: Yeah, be +nice to the captioner, Eric. + +00:27:57.033 --> 00:27:58.866 align:middle line:79% position:50% size:48% +>> ERIC VELLEMAN: +Oh, that's trouble. + +00:27:58.933 --> 00:27:59.799 align:middle line:79% position:50% size:50% +>> SHADI ABOU-ZAHRA: +Having a match. + +00:27:59.866 --> 00:28:02.166 align:middle line:85% position:50% size:60% +>> ERIC VELLEMAN: Woops. + +00:28:02.233 --> 00:28:03.400 align:middle line:85% position:50% size:65% +>> SHADI ABOU-ZAHRA: Okay. + +00:28:03.466 --> 00:28:06.366 align:middle line:85% position:50% size:53% +So we do not -- yeah. + +00:28:06.433 --> 00:28:11.700 align:middle line:79% position:50% size:73% +We provide this accessibility +statement generator as an + +00:28:11.766 --> 00:28:15.866 align:middle line:79% position:50% size:75% +example as the minimum +requirements in the Directive. + +00:28:15.933 --> 00:28:18.366 align:middle line:79% position:50% size:75% +Also something internationally +-- this is not only for + +00:28:18.433 --> 00:28:20.400 align:middle line:79% position:50% size:73% +Europe, but providing +accessibility statements as a + +00:28:20.466 --> 00:28:23.200 align:middle line:85% position:50% size:75% +best practice internationally. + +00:28:23.266 --> 00:28:28.233 align:middle line:79% position:50% size:75% +And the expectation is whoever +wants can take it, adapt it. + +00:28:28.299 --> 00:28:31.099 align:middle line:79% position:50% size:73% +So this is why Eric mentioned +that you can actually add + +00:28:31.166 --> 00:28:36.299 align:middle line:79% position:50% size:73% +additional questions, so we +try to write the code in such + +00:28:36.366 --> 00:28:42.900 align:middle line:79% position:50% size:75% +a way that you can extend this +basis and also translations so + +00:28:42.966 --> 00:28:45.733 align:middle line:79% position:50% size:40% +that you can use +this separately. + +00:28:45.799 --> 00:28:48.900 align:middle line:85% position:50% size:75% +So, yeah, it's a tool to help. + +00:28:48.966 --> 00:28:51.333 align:middle line:79% position:50% size:50% +Some have reused it, +like in Portugal. + +00:28:51.400 --> 00:28:53.366 align:middle line:85% position:50% size:40% +Others have not. + +00:28:53.433 --> 00:28:55.599 align:middle line:79% position:50% size:63% +And it's totally up to -- +you know, to people + +00:28:55.666 --> 00:28:57.500 align:middle line:85% position:50% size:53% +what they do with it. + +00:28:57.566 --> 00:28:59.466 align:middle line:79% position:50% size:70% +We did notice -- we did +learn from Portugal that the + +00:28:59.533 --> 00:29:02.433 align:middle line:79% position:50% size:73% +translation was not as easy, +and this is why we're redoing + +00:29:02.500 --> 00:29:05.033 align:middle line:85% position:50% size:73% +that, to support translation. + +00:29:05.099 --> 00:29:06.700 align:middle line:85% position:50% size:13% +Good. + +00:29:06.766 --> 00:29:14.033 align:middle line:79% position:50% size:60% +And then somebody re -- +put the Dutch URL again. + +00:29:14.099 --> 00:29:16.833 align:middle line:79% position:50% size:58% +I think somebody wants +to have fun with Dutch. + +00:29:16.900 --> 00:29:17.633 align:middle line:85% position:50% size:13% +Good. + +00:29:17.700 --> 00:29:19.166 align:middle line:85% position:50% size:65% +Thank you very much, Eric. + +00:29:19.233 --> 00:29:20.433 align:middle line:85% position:50% size:60% +>> ERIC VELLEMAN: Sorry. + +00:29:20.500 --> 00:29:23.233 align:middle line:79% position:50% size:68% +>> SHADI ABOU-ZAHRA: Dutch +lesson in the next webinar. + +00:29:23.299 --> 00:29:26.299 align:middle line:85% position:50% size:45% +Let's now move on. diff --git a/content-images/about/wai-tools/video5.vtt b/content-images/about/wai-tools/video5.vtt new file mode 100644 index 00000000000..7690ee7b72d --- /dev/null +++ b/content-images/about/wai-tools/video5.vtt @@ -0,0 +1,885 @@ +WEBVTT + +00:00:02.266 --> 00:00:03.233 align:middle line:79% position:50% size:43% +>> CARLOS DUARTE: +All right. + +00:00:03.299 --> 00:00:12.400 align:middle line:79% position:50% size:75% +So as Shadi mentioned, earlier +in the project we tried to + +00:00:12.466 --> 00:00:19.033 align:middle line:79% position:50% size:68% +think of ways that we could +use all the data that might + +00:00:19.100 --> 00:00:22.766 align:middle line:79% position:50% size:75% +come out of tools that +implementation rules and other + +00:00:22.833 --> 00:00:29.000 align:middle line:79% position:50% size:73% +open data formats, and it was +one of our goals to propose a + +00:00:29.066 --> 00:00:33.466 align:middle line:79% position:50% size:73% +prototype that could show how +we could browse the large + +00:00:33.533 --> 00:00:38.166 align:middle line:79% position:50% size:75% +amounts of accessibility +testing data that might become + +00:00:38.233 --> 00:00:40.933 align:middle line:85% position:50% size:68% +available in such a future. + +00:00:41.000 --> 00:00:50.533 align:middle line:79% position:50% size:73% +So for this, we tried to look +at this from the perspective + +00:00:50.600 --> 00:00:55.833 align:middle line:79% position:50% size:75% +of someone that's responsible +for monitoring efforts, and so + +00:00:55.899 --> 00:01:00.100 align:middle line:79% position:50% size:75% +we asked ourselves: How can we +leverage the support of open + +00:01:00.166 --> 00:01:04.733 align:middle line:79% position:50% size:45% +data for those +monitoring bodies? + +00:01:04.799 --> 00:01:07.866 align:middle line:79% position:50% size:48% +And we came up with +this prototype, and + +00:01:07.933 --> 00:01:10.799 align:middle line:85% position:50% size:45% +we emphasize this. + +00:01:10.866 --> 00:01:13.166 align:middle line:85% position:50% size:45% +It is a prototype. + +00:01:13.233 --> 00:01:17.200 align:middle line:79% position:50% size:63% +It's not something that's +production ready, but we + +00:01:17.266 --> 00:01:23.200 align:middle line:79% position:50% size:75% +developed this as a way to +support future explorations of + +00:01:23.266 --> 00:01:28.400 align:middle line:85% position:50% size:68% +what can such a tool offer. + +00:01:28.466 --> 00:01:34.466 align:middle line:79% position:50% size:73% +So let me start by addressing +what types of data did we + +00:01:34.533 --> 00:01:37.599 align:middle line:85% position:50% size:70% +consider for this prototype. + +00:01:37.666 --> 00:01:41.666 align:middle line:79% position:50% size:70% +So to begin with, we need to +get data from somewhere, and + +00:01:41.733 --> 00:01:44.333 align:middle line:79% position:50% size:40% +we can see there +are two sources. + +00:01:44.400 --> 00:01:47.933 align:middle line:79% position:50% size:65% +First, the accessibility +evaluation reports that we + +00:01:48.000 --> 00:01:50.400 align:middle line:85% position:50% size:58% +have been listening to. + +00:01:50.466 --> 00:01:56.233 align:middle line:79% position:50% size:75% +So these evaluation reports +are written using EARL so that + +00:01:56.299 --> 00:01:58.200 align:middle line:85% position:50% size:48% +we can import them. + +00:01:58.266 --> 00:02:05.466 align:middle line:79% position:50% size:73% +So something that's an output +of an Axe evaluation and Alfa + +00:02:05.533 --> 00:02:09.866 align:middle line:79% position:50% size:75% +evaluation or a QualWeb +evaluation, all tools that can + +00:02:09.933 --> 00:02:13.233 align:middle line:79% position:50% size:60% +output their evaluation +reports in EARL we could + +00:02:13.300 --> 00:02:17.599 align:middle line:85% position:50% size:70% +import that to this browser. + +00:02:17.666 --> 00:02:20.800 align:middle line:79% position:50% size:63% +And, second, all the +accessibility statements. + +00:02:20.866 --> 00:02:26.466 align:middle line:79% position:50% size:73% +We included statements that +use the open format, both the + +00:02:26.533 --> 00:02:29.800 align:middle line:79% position:50% size:75% +one that Eric just presented +and that was developed as part + +00:02:29.866 --> 00:02:32.833 align:middle line:79% position:50% size:60% +of the project, but also +the one that we adapted + +00:02:32.900 --> 00:02:34.733 align:middle line:85% position:50% size:58% +to be used in Portugal. + +00:02:34.800 --> 00:02:40.400 align:middle line:79% position:50% size:55% +And from both of these +sources, we collect an + +00:02:40.466 --> 00:02:42.166 align:middle line:85% position:50% size:70% +interesting variety of data. + +00:02:42.233 --> 00:02:48.166 align:middle line:79% position:50% size:60% +So we extract from these +sources data on success + +00:02:48.233 --> 00:02:54.000 align:middle line:79% position:50% size:75% +criteria, on the elements that +have been assessed on the ACT + +00:02:54.066 --> 00:02:59.766 align:middle line:79% position:50% size:73% +rules that were used to check +if the tool uses ACT rules, + +00:02:59.833 --> 00:03:02.833 align:middle line:79% position:50% size:68% +the assertions that +were made, and the outcomes + +00:03:02.900 --> 00:03:04.800 align:middle line:85% position:50% size:50% +of those assertions. + +00:03:04.866 --> 00:03:08.466 align:middle line:79% position:50% size:70% +And so that we can browse +this, we have also added the + +00:03:08.533 --> 00:03:13.000 align:middle line:79% position:50% size:75% +five -- a set of metadata that +can be used to categorize and + +00:03:13.066 --> 00:03:15.266 align:middle line:85% position:50% size:73% +support this evaluation data. + +00:03:15.333 --> 00:03:19.766 align:middle line:79% position:50% size:58% +So here we can see the +continent, country, the + +00:03:19.833 --> 00:03:23.966 align:middle line:79% position:50% size:65% +category of the website or +the mobile app owner. + +00:03:24.033 --> 00:03:28.233 align:middle line:79% position:50% size:63% +So if it's a private or a +public entity, the sector + +00:03:28.300 --> 00:03:35.533 align:middle line:79% position:50% size:73% +where the entity operates in, +and even what evaluation tool + +00:03:35.599 --> 00:03:40.666 align:middle line:79% position:50% size:48% +it was used to +produce the report. + +00:03:40.733 --> 00:03:47.433 align:middle line:79% position:50% size:68% +And we in the tool designed +a number of visualizations. + +00:03:47.500 --> 00:03:51.533 align:middle line:79% position:50% size:73% +So you will be able to see -- +we'll do a short + +00:03:51.599 --> 00:03:54.133 align:middle line:85% position:50% size:53% +demo after the slide. + +00:03:54.199 --> 00:03:58.699 align:middle line:79% position:50% size:73% +So most data is presented +using bar charts with -- that + +00:03:58.766 --> 00:04:04.300 align:middle line:79% position:50% size:73% +are grouped by the assertions +or the success criteria. + +00:04:04.366 --> 00:04:08.633 align:middle line:79% position:50% size:60% +We -- besides the bar +charts, we have a table. + +00:04:08.699 --> 00:04:12.300 align:middle line:79% position:50% size:70% +We provide access to a table +with the equivalent data of + +00:04:12.366 --> 00:04:17.266 align:middle line:79% position:50% size:75% +the bar charts, but we made an +effort so that all the charts + +00:04:17.333 --> 00:04:23.866 align:middle line:79% position:50% size:75% +are keyboard navigable, and we +also include a timeline view + +00:04:23.933 --> 00:04:28.000 align:middle line:79% position:50% size:65% +so that it's easier to get +this grasping of how the + +00:04:28.066 --> 00:04:31.733 align:middle line:79% position:50% size:55% +different metrics have +evolved over time. + +00:04:31.800 --> 00:04:38.833 align:middle line:79% position:50% size:73% +And I think it's best to +illustrate the different ways + +00:04:38.899 --> 00:04:43.866 align:middle line:79% position:50% size:68% +that we can interact with +the demo, with the data, by + +00:04:43.933 --> 00:04:45.500 align:middle line:85% position:50% size:48% +showing you a demo. + +00:04:45.566 --> 00:04:56.733 align:middle line:79% position:50% size:50% +So let me switch to +new -- a new window. + +00:05:00.366 --> 00:05:01.533 align:middle line:85% position:50% size:13% +Okay. + +00:05:01.600 --> 00:05:10.133 align:middle line:79% position:50% size:73% +And so now this is the -- the +initial page that you access + +00:05:10.199 --> 00:05:14.266 align:middle line:85% position:50% size:63% +when you enter this tool. + +00:05:14.333 --> 00:05:19.033 align:middle line:79% position:50% size:60% +Here we have -- let me +start with a disclaimer. + +00:05:19.100 --> 00:05:22.399 align:middle line:79% position:50% size:65% +All the data that's being +presented in this demo was + +00:05:22.466 --> 00:05:27.300 align:middle line:79% position:50% size:73% +randomly generated, including +the name of websites and the + +00:05:27.366 --> 00:05:29.633 align:middle line:79% position:50% size:43% +evaluation tools, +so everything has + +00:05:29.699 --> 00:05:31.733 align:middle line:85% position:50% size:60% +been randomly generated. + +00:05:31.800 --> 00:05:35.333 align:middle line:79% position:50% size:75% +And the volume of data that we +have here is representative of + +00:05:35.399 --> 00:05:39.399 align:middle line:79% position:50% size:75% +the number of evaluations that +a country the size of Portugal + +00:05:39.466 --> 00:05:44.433 align:middle line:79% position:50% size:63% +can expect to have in +their monitoring process. + +00:05:44.500 --> 00:05:49.766 align:middle line:79% position:50% size:68% +So as I was saying, this is +the initial visualization. + +00:05:49.833 --> 00:05:58.933 align:middle line:79% position:50% size:75% +You get a chart grouped by +pages that have been evaluated + +00:05:59.000 --> 00:06:02.533 align:middle line:85% position:50% size:65% +over different continents. + +00:06:02.600 --> 00:06:04.533 align:middle line:79% position:50% size:40% +We start at the +continent level. + +00:06:04.600 --> 00:06:11.899 align:middle line:79% position:50% size:68% +You can include information +about the assertions, the + +00:06:11.966 --> 00:06:14.600 align:middle line:79% position:50% size:73% +passed assertions, the failed +assertions, the ones that the + +00:06:14.666 --> 00:06:18.133 align:middle line:79% position:50% size:58% +tool can't tell if they +have passed or failed, + +00:06:18.199 --> 00:06:21.600 align:middle line:79% position:50% size:55% +the inapplicable ones, +and the untested ones. + +00:06:21.666 --> 00:06:26.433 align:middle line:79% position:50% size:58% +So similar to what Eric +just showed previously. + +00:06:26.500 --> 00:06:32.633 align:middle line:79% position:50% size:68% +You can, instead of showing +assertions, show the + +00:06:32.699 --> 00:06:34.899 align:middle line:79% position:50% size:50% +information based +on success criteria. + +00:06:34.966 --> 00:06:37.866 align:middle line:79% position:50% size:63% +So passed, fail, +can't tell, inapplicable, + +00:06:37.933 --> 00:06:41.199 align:middle line:85% position:50% size:55% +and untested criteria. + +00:06:41.266 --> 00:06:48.766 align:middle line:79% position:50% size:73% +And let me go back to +assertions and move this Zoom + +00:06:48.833 --> 00:06:55.666 align:middle line:79% position:50% size:68% +window to another place +and show you the data table + +00:06:55.733 --> 00:06:58.133 align:middle line:85% position:50% size:33% +that we have. + +00:06:58.199 --> 00:07:01.766 align:middle line:79% position:50% size:75% +Basically, the same data that +you have in the bar charts can + +00:07:01.833 --> 00:07:06.066 align:middle line:79% position:50% size:48% +be presented in +the tabular format. + +00:07:06.133 --> 00:07:07.399 align:middle line:85% position:50% size:13% +Okay. + +00:07:07.466 --> 00:07:10.000 align:middle line:79% position:50% size:70% +So if you want to see data +grouped by country, you just + +00:07:10.066 --> 00:07:15.066 align:middle line:85% position:50% size:65% +pick that on that sidebar. + +00:07:15.133 --> 00:07:19.833 align:middle line:79% position:50% size:68% +And now that's -- instead +of just seeing, okay, like, + +00:07:19.899 --> 00:07:22.300 align:middle line:79% position:50% size:68% +seeing everything on the +sector page, I just want to + +00:07:22.366 --> 00:07:27.633 align:middle line:79% position:50% size:73% +know information about the +two -- the testability of the + +00:07:27.699 --> 00:07:29.866 align:middle line:79% position:50% size:55% +private and public +sectors in the British + +00:07:29.933 --> 00:07:31.833 align:middle line:85% position:50% size:58% +Indian Ocean territory. + +00:07:31.899 --> 00:07:38.466 align:middle line:79% position:50% size:75% +So I just select that bar, and +I can drill down on sector + +00:07:38.533 --> 00:07:44.699 align:middle line:79% position:50% size:60% +information on the model +window that shows up. + +00:07:44.766 --> 00:07:52.766 align:middle line:79% position:50% size:75% +And now I have the information +about just the British Indian + +00:07:52.833 --> 00:07:57.266 align:middle line:79% position:50% size:65% +Ocean territory grouped by +private sector and public + +00:07:57.333 --> 00:08:00.500 align:middle line:79% position:50% size:43% +sector, and I +can compare that. + +00:08:00.566 --> 00:08:04.533 align:middle line:79% position:50% size:63% +So if I want to see all +data about sectors, I can + +00:08:04.600 --> 00:08:05.866 align:middle line:85% position:50% size:43% +just press there. + +00:08:05.933 --> 00:08:10.866 align:middle line:79% position:50% size:70% +And I see, okay, I have over +4,000 pages on the private + +00:08:10.933 --> 00:08:16.966 align:middle line:79% position:50% size:68% +sector and over 3,000 +pages on the public sector. + +00:08:17.033 --> 00:08:22.399 align:middle line:79% position:50% size:68% +And now let's say I want to +compare data of the public + +00:08:22.466 --> 00:08:24.833 align:middle line:79% position:50% size:48% +sector onto +specific countries. + +00:08:24.899 --> 00:08:31.066 align:middle line:79% position:50% size:75% +So instead of drilling down, I +want to group by this -- a way + +00:08:31.133 --> 00:08:34.466 align:middle line:79% position:50% size:68% +to group this kind of data +using another category and, + +00:08:34.533 --> 00:08:39.033 align:middle line:79% position:50% size:58% +say, okay, let me group +this, for instance, + +00:08:39.100 --> 00:08:40.933 align:middle line:85% position:50% size:55% +France and Luxembourg. + +00:08:41.000 --> 00:08:42.200 align:middle line:85% position:50% size:13% +Okay. + +00:08:42.266 --> 00:08:49.366 align:middle line:79% position:50% size:70% +So I can now see the data on +public sector bodies only. + +00:08:49.433 --> 00:08:53.433 align:middle line:79% position:50% size:63% +So we're comparing sector +public grouped by two + +00:08:53.500 --> 00:08:55.966 align:middle line:79% position:50% size:43% +countries, France +and Luxembourg. + +00:08:56.033 --> 00:09:01.433 align:middle line:79% position:50% size:63% +And in this visualization +I can have them both at + +00:09:01.500 --> 00:09:03.500 align:middle line:85% position:50% size:35% +the same time. + +00:09:03.566 --> 00:09:06.033 align:middle line:79% position:50% size:43% +Let me move +to another class. + +00:09:06.100 --> 00:09:07.566 align:middle line:85% position:50% size:28% +Let me see. + +00:09:07.633 --> 00:09:10.833 align:middle line:85% position:50% size:75% +We can see tags or categories. + +00:09:10.899 --> 00:09:14.766 align:middle line:79% position:50% size:58% +Here we have, once +again, randomized data. + +00:09:14.833 --> 00:09:20.533 align:middle line:79% position:50% size:73% +Don't forget if I want to see +how health -- how the health + +00:09:20.600 --> 00:09:24.666 align:middle line:79% position:50% size:73% +sector has been doing over +time, I can just, once again, + +00:09:24.733 --> 00:09:30.000 align:middle line:85% position:50% size:75% +select this on the bar charts. + +00:09:30.066 --> 00:09:31.566 align:middle line:85% position:50% size:15% +Sorry. + +00:09:31.633 --> 00:09:34.899 align:middle line:79% position:50% size:65% +And then I can go and do +-- and select the timeline + +00:09:34.966 --> 00:09:40.733 align:middle line:79% position:50% size:68% +option, which will show me +for every month that I have + +00:09:40.799 --> 00:09:48.700 align:middle line:79% position:50% size:70% +collected data on this -- on +websites or mobile apps that + +00:09:48.766 --> 00:09:55.299 align:middle line:79% position:50% size:68% +have been tagged as health, +I can see the data here. + +00:09:55.366 --> 00:10:00.033 align:middle line:79% position:50% size:75% +Let me go back to tag and show +you how I can compare, for + +00:10:00.100 --> 00:10:04.299 align:middle line:79% position:50% size:70% +instance, data on the health +and the media sector. + +00:10:04.366 --> 00:10:08.766 align:middle line:79% position:50% size:70% +So here I can use the +comparison and health, which + +00:10:08.833 --> 00:10:11.566 align:middle line:79% position:50% size:50% +is the one that I've +selected it already. + +00:10:11.633 --> 00:10:12.899 align:middle line:85% position:50% size:50% +Previously selected. + +00:10:12.966 --> 00:10:16.299 align:middle line:79% position:50% size:75% +And now I can also select +media, and the comparison page + +00:10:16.366 --> 00:10:20.000 align:middle line:79% position:50% size:75% +will show me, okay, the number +of pages that have been + +00:10:20.066 --> 00:10:22.799 align:middle line:79% position:50% size:75% +evaluated and the passed +assertions, failed assertions, + +00:10:22.866 --> 00:10:24.500 align:middle line:79% position:50% size:65% +can't tell assertions, +inapplicable, and untested + +00:10:24.566 --> 00:10:28.633 align:middle line:79% position:50% size:68% +assertions for both sectors +side by side so that I + +00:10:28.700 --> 00:10:32.266 align:middle line:85% position:50% size:43% +can compare them. + +00:10:32.333 --> 00:10:37.100 align:middle line:79% position:50% size:48% +Let me just present +one more feature. + +00:10:37.166 --> 00:10:39.833 align:middle line:79% position:50% size:58% +If I go -- this is +specific at this level. + +00:10:39.899 --> 00:10:44.700 align:middle line:79% position:50% size:65% +If I go to the application +website and I change my + +00:10:44.766 --> 00:10:48.133 align:middle line:79% position:50% size:63% +visualization from the +assertions to the success + +00:10:48.200 --> 00:10:54.500 align:middle line:79% position:50% size:68% +criteria, now I can have +access to specific tests of + +00:10:54.566 --> 00:10:57.466 align:middle line:79% position:50% size:53% +each success criteria +for one application. + +00:10:57.533 --> 00:11:04.366 align:middle line:79% position:50% size:75% +So we can see here for the +front applications of the fair + +00:11:04.433 --> 00:11:07.600 align:middle line:79% position:50% size:63% +success criteria, and I'm +going to select here this + +00:11:07.666 --> 00:11:10.100 align:middle line:79% position:50% size:45% +application or +website, whatever. + +00:11:10.166 --> 00:11:13.799 align:middle line:79% position:50% size:73% +And I have this -- it's only +in this view that I have this + +00:11:13.866 --> 00:11:16.299 align:middle line:79% position:50% size:40% +option here, the +Details option. + +00:11:16.366 --> 00:11:20.500 align:middle line:79% position:50% size:73% +So if I go to the Details +page, now I have -- okay, for + +00:11:20.566 --> 00:11:23.733 align:middle line:79% position:50% size:75% +this application or website, I +know that passed the success + +00:11:23.799 --> 00:11:27.733 align:middle line:79% position:50% size:70% +criteria, and I can see the +different tests that tell me + +00:11:27.799 --> 00:11:31.366 align:middle line:79% position:50% size:73% +that, and I see that it +failed this success criteria. + +00:11:31.433 --> 00:11:33.500 align:middle line:79% position:50% size:58% +And, once again, I have +access to the different + +00:11:33.566 --> 00:11:35.766 align:middle line:85% position:50% size:53% +tests where it fails. + +00:11:35.833 --> 00:11:42.333 align:middle line:79% position:50% size:75% +So we can, in fact, go all the +way up from a grouping of the + +00:11:42.399 --> 00:11:51.399 align:middle line:79% position:50% size:75% +data by continent to specific +assertions of one application. + +00:11:51.466 --> 00:11:57.833 align:middle line:79% position:50% size:65% +So this shows us the range +that we can have just by + +00:11:57.899 --> 00:12:03.299 align:middle line:79% position:50% size:70% +visualizing data that's made +available from this -- + +00:12:03.366 --> 00:12:05.366 align:middle line:85% position:50% size:43% +from these tools. + +00:12:05.433 --> 00:12:10.433 align:middle line:79% position:50% size:60% +So let me go back to the +slides and just -- I've + +00:12:10.500 --> 00:12:13.666 align:middle line:85% position:50% size:63% +finished my presentation. + +00:12:13.733 --> 00:12:17.933 align:middle line:85% position:50% size:75% +So all of this is open source. + +00:12:18.000 --> 00:12:22.466 align:middle line:85% position:50% size:70% +You can play with this demo. + +00:12:22.533 --> 00:12:23.733 align:middle line:85% position:50% size:43% +It's available at + +00:12:23.799 --> 00:12:37.133 align:middle line:79% position:50% size:60% +www.qualweb.di.fc.ul.pt/ +placm/test. + +00:12:37.200 --> 00:12:42.866 align:middle line:79% position:50% size:75% +The source code will be really +soon available at this rep. + +00:12:42.933 --> 00:12:44.833 align:middle line:79% position:50% size:65% +It's not there yet because +we're moving this to a + +00:12:44.899 --> 00:12:50.766 align:middle line:79% position:50% size:60% +dedicated repository for +this -- for these tools. + +00:12:50.833 --> 00:12:58.799 align:middle line:79% position:50% size:75% +So you will be able to find it +at GitHub.com/carlosapaduarte. + +00:12:58.866 --> 00:12:59.733 align:middle line:85% position:50% size:23% +That's -- + +00:12:59.799 --> 00:13:10.000 align:middle line:79% position:50% size:80% +C-A-R-L-O-S-A-P-A-D-U-A-R-T-E/pl +acm. Okay? + +00:13:10.066 --> 00:13:13.966 align:middle line:79% position:50% size:73% +And if you have any questions +or any problems getting this + +00:13:14.033 --> 00:13:17.033 align:middle line:79% position:50% size:48% +to run, you can get +in touch with me at + +00:13:17.100 --> 00:13:24.566 align:middle line:85% position:50% size:55% +caduarte@campus.ul.pt. + +00:13:24.633 --> 00:13:26.933 align:middle line:85% position:50% size:35% +And thank you. + +00:13:27.000 --> 00:13:29.233 align:middle line:79% position:50% size:60% +I think I've gone a +little bit over my time. + +00:13:29.299 --> 00:13:31.700 align:middle line:85% position:50% size:55% +Sorry for that, Shadi. + +00:13:31.766 --> 00:13:33.000 align:middle line:79% position:50% size:60% +>> SHADI ABOU-ZAHRA: No. +You're spot on. + +00:13:33.066 --> 00:13:34.200 align:middle line:85% position:50% size:45% +Thank you, Carlos. + +00:13:34.266 --> 00:13:38.833 align:middle line:79% position:50% size:68% +And as I told folks earlier +on, this is a bit more + +00:13:38.899 --> 00:13:40.166 align:middle line:85% position:50% size:45% +aspirational part. + +00:13:40.233 --> 00:13:45.000 align:middle line:79% position:50% size:70% +So just to recap, we looked +at, you know, in the morning + +00:13:45.066 --> 00:13:49.566 align:middle line:79% position:50% size:73% +of these many, many small +rules, right, that test very, + +00:13:49.633 --> 00:13:51.866 align:middle line:85% position:50% size:50% +very specific parts. + +00:13:51.933 --> 00:13:54.866 align:middle line:79% position:50% size:60% +And we looked at +so-called implementation + +00:13:54.933 --> 00:13:56.166 align:middle line:85% position:50% size:45% +reports for these. + +00:13:56.233 --> 00:13:58.566 align:middle line:79% position:50% size:55% +So how do we collect +information from these + +00:13:58.633 --> 00:14:00.799 align:middle line:79% position:50% size:48% +different rules and +the tools and the + +00:14:00.866 --> 00:14:02.866 align:middle line:85% position:50% size:65% +methodologies implemented? + +00:14:02.933 --> 00:14:07.466 align:middle line:79% position:50% size:70% +And then if we can aggregate +that on a, say, monitoring + +00:14:07.533 --> 00:14:11.899 align:middle line:79% position:50% size:73% +body level or, you know, even +more widely, can we collect + +00:14:11.966 --> 00:14:15.566 align:middle line:79% position:50% size:45% +all this data, all +this information? + +00:14:15.633 --> 00:14:19.100 align:middle line:79% position:50% size:63% +I understand this seems a +bit more futuristic, but, + +00:14:19.166 --> 00:14:22.033 align:middle line:79% position:50% size:43% +you know, why not +think about that. + +00:14:22.100 --> 00:14:26.066 align:middle line:79% position:50% size:55% +Public websites are +required to be public. + +00:14:26.133 --> 00:14:29.799 align:middle line:79% position:50% size:75% +The monitoring of their +reports is required to happen. + +00:14:29.866 --> 00:14:33.333 align:middle line:79% position:50% size:73% +If we can provide this open +data, we can look more -- and + +00:14:33.399 --> 00:14:34.966 align:middle line:79% position:50% size:43% +this is not about +shaming people. + +00:14:35.033 --> 00:14:38.299 align:middle line:79% position:50% size:73% +This is about analyzing where +are there issues, which + +00:14:38.366 --> 00:14:43.299 align:middle line:79% position:50% size:70% +particular sectors need more +support, need more training + +00:14:43.366 --> 00:14:48.233 align:middle line:79% position:50% size:75% +and advocation, and, you know, +member states or countries or + +00:14:48.299 --> 00:14:50.700 align:middle line:85% position:50% size:48% +even organizations. + +00:14:50.766 --> 00:14:54.966 align:middle line:79% position:50% size:75% +It could be on an organization +level could use that to + +00:14:55.033 --> 00:15:00.233 align:middle line:79% position:50% size:75% +improve their actions and what +they're doing to make sure + +00:15:00.299 --> 00:15:02.200 align:middle line:85% position:50% size:73% +accessibility is implemented. + +00:15:02.266 --> 00:15:05.633 align:middle line:79% position:50% size:58% +So the key word here +is EARL, the evaluation + +00:15:05.700 --> 00:15:08.333 align:middle line:85% position:50% size:50% +and report language. + +00:15:08.399 --> 00:15:13.366 align:middle line:79% position:50% size:73% +This is a format to write +down test results, basically. + +00:15:13.433 --> 00:15:18.399 align:middle line:79% position:50% size:75% +I'll read out the GitHub URL +for that, where you can go and + +00:15:18.466 --> 00:15:22.633 align:middle line:79% position:50% size:53% +see that syntax for +writing such results. + +00:15:22.700 --> 00:15:31.566 align:middle line:79% position:50% size:70% +So it's github.com/w3c/earl, +E-A-R-L, + +00:15:31.633 --> 00:15:35.299 align:middle line:85% position:50% size:78% +Evaluation And Report Language. + +00:15:35.366 --> 00:15:36.966 align:middle line:85% position:50% size:13% +Okay. + +00:15:37.033 --> 00:15:39.266 align:middle line:79% position:50% size:43% +We had a question +for you, Carlos. + +00:15:39.333 --> 00:15:41.933 align:middle line:79% position:50% size:75% +My master's student is +studying the Portuguese public + +00:15:42.000 --> 00:15:44.799 align:middle line:79% position:50% size:43% +libraries website +accessibility. + +00:15:44.866 --> 00:15:47.799 align:middle line:79% position:50% size:65% +He has full list of public +libraries' addresses. + +00:15:47.866 --> 00:15:51.066 align:middle line:79% position:50% size:75% +Can he import the list to test +data browser and do a summary + +00:15:51.133 --> 00:15:53.533 align:middle line:85% position:50% size:60% +analysis of all of them? + +00:15:54.899 --> 00:15:58.500 align:middle line:85% position:50% size:63% +>> CARLOS DUARTE: No. No. + +00:15:58.566 --> 00:16:03.899 align:middle line:79% position:50% size:75% +Because this, too, does not do +the accessibility evaluation. + +00:16:03.966 --> 00:16:10.066 align:middle line:79% position:50% size:73% +If you have the accessibility +evaluation of all of those + +00:16:10.133 --> 00:16:15.233 align:middle line:79% position:50% size:70% +libraries in EARL format, as +Shadi was just mentioning, + +00:16:15.299 --> 00:16:23.666 align:middle line:79% position:50% size:75% +yes, then you can import those +reports and use this tool to + +00:16:23.733 --> 00:16:27.433 align:middle line:79% position:50% size:65% +go over and do the -- +what kind of analysis that + +00:16:27.500 --> 00:16:29.133 align:middle line:85% position:50% size:38% +you want to do. diff --git a/content-images/wai-about-wai b/content-images/wai-about-wai deleted file mode 120000 index b21447a546c..00000000000 --- a/content-images/wai-about-wai +++ /dev/null @@ -1 +0,0 @@ -../_external/resources/wai-about-wai/content-images/wai-about-wai \ No newline at end of file diff --git a/pages/about/accessibility-statement.md b/pages/about/accessibility-statement.md new file mode 100644 index 00000000000..10ef43b10af --- /dev/null +++ b/pages/about/accessibility-statement.md @@ -0,0 +1,73 @@ +--- +title: "Accessibility Statement for WAI Website" +lang: en + +permalink: /about/accessibility-statement/ +ref: /about/accessibility-statement/ + +footer: > +
This statement was updated on 1 September 2020. It was generated using the W3C Accessibility Statement Generator Tool.
+Contributors: Shawn Lawton Henry, Eric Eggert, Shadi Abou-Zahra, Judy Brewer, and Coralie Mercier.
+--- + +{::nomarkdown} +{% include box.html type="start" h="2" title="Summary" class="full" %} +{:/} + +Web Accessibility Initiative (WAI) website content posted since May 2018 meets Web Content Accessibility Guidelines (WCAG) 2.1 at Level AA. Older content meets earlier versions of WCAG. Parts of the content meets Level AAA. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + + +Web Accessibility Initiative (WAI) takes the following measures to ensure accessibility of the WAI website:
+The Web Content Accessibility Guidelines (WCAG) standard defines requirements to improve accessibility for people with disabilities. It defines three levels of conformance: Level A, Level AA, and Level AAA. "Fully conforms" means that the content meets all of the WCAG requirements at the specified Level without exceptions.
+WAI website content posted since May 2018 fully conforms to WCAG 2.1 Level AA. It partially conforms to Level AAA.
+Older content conforms to earlier versions of WCAG, for example, WCAG 2.0. Most of the older content is archived and will not be updated.
+ +We welcome your feedback on the accessibility of the WAI website. Please let us know if you encounter accessibility barriers.
+We usually respond to accessibility feedback from e-mail within 3-5 business days. Sometimes it can take up to 3 weeks. If you do not receive a reply to e-mail within 10 business days, you can contact Shawn Henry at +1-617-395-7664.
+ +The WAI website is designed to be compatible with assistive technologies and the last two versions of major browsers.
+In Internet Explorer 10, 11, and older browsers, some aspects of the website may not display optimally. The website is not designed for Internet Explorer 9 and earlier versions.
+ +The WAI website relies upon the following technologies for conformance with WCAG 2.1:
+The following technologies are used to improve accessibility and the user experience for everyone:
+Several videos on the WAI website use YouTube. As an alternative, the videos are provided as MP4 files on a W3C server.
+The WAI website links to GitHub for providing input on resources and documents. As an alternative, an e-mail address is included for providing input.
+ +WAI assessed the accessibility of the WAI website by self-evaluation.
+ +This Accessibility Statement is approved by Shawn Lawton Henry, WAI Outreach Coordinator.
diff --git a/pages/about/contacting.md b/pages/about/contacting.md new file mode 100644 index 00000000000..25da8f4a2db --- /dev/null +++ b/pages/about/contacting.md @@ -0,0 +1,86 @@ +--- +title: Contacting WAI +nav_title: "Contacting" +lang: en + +permalink: /about/contacting/ +ref: /about/contacting/ + +feedbackmail: wai@w3.org +class: tight-page +--- + +{::options toc_levels="2" /} + +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include_cached toc.html type="end" %} +{:/} + +## Press Inquiries + +For press inquiries regarding WAI or web accessibility, please send e-mail to [shawn@w3.org, w3t-pr@w3.org, wai@w3.org](mailto:shawn@w3.org,w3t-pr@w3.org,wai@w3.org?subject=press%20request-accessibility) + +For general W3C press inquires, see [How to contact W3C, Press](/Consortium/Contact#press). + +## Technical, Implementation, and other Support Questions + +Many questions are addressed in documents on the WAI site. For example, +[Understanding WCAG 2.0](/TR/UNDERSTANDING-WCAG20/) provides extensive +guidance on making Web sites accessible, including the intent of +guidelines and success criteria; how they help people with different +disabilities, browser and assistive technology support notes, examples, +and resources. The [WAI site map](/sitemap/) lists all documents and +[search](/search/) is available to look for specific information. + +Current Web accessibility issues are discussed through the WAI mailing +lists, which are archived and visible to the public. The [WAI Interest +Group](/WAI/IG/) is a public forum for discussion of issues relating to +Web accessibility. If you have a question that might be relevant to the +WAI IG list, you can: + +- Search the [WAI IG list archives](http://lists.w3.org/Archives/Public/w3c-wai-ig/) to see if it has already been addressed sufficiently +- Read the [Mailing Lists](/WAI/about/groups/waiig/#mailinglist) section of the WAI IG page +- [Subscribe to the Discussion List](/WAI/about/groups/waiig/#subscribing-and-unsubscribing-to-the-discussion-list) and + post appropriate questions + +## Feedback on Specific Documents + +To submit comments on a specific document: + +- Many WAI resources are on GitHub and you can create issues and + submit pull requests. Look for the GitHub links near the bottom of + the page. +- WAI resource documents have a specific e-mail address for submitting + comments, which is usually **at the bottom of the document**. This + is usually:Deliverable | +Description | +Status | +Editors' Draft | +Formal Version | +
---|---|---|---|---|
ARIA Core Specifications | +||||
Accessible Rich Internet Applications (WAI-ARIA) 1.1 | +This specification provides an ontology of roles, states, and properties that set out an abstract model for accessible interfaces and can be used to improve the accessibility and interoperability of Web Content and Applications. This information can be mapped to accessibility frameworks that use this information to provide alternative access solutions. Similarly, this information can be used to change the rendering of content dynamically using different style sheet properties. The result is an interoperable method for associating behaviors with document-level markup. | +In Working Drafts; Approaching readiness for CR | +Editors' draft of Accessible Rich Internet Applications (WAI-ARIA) 1.1 | +Formal published version of Accessible Rich Internet Applications (WAI-ARIA) 1.1 | +
Core Accessibility API Mappings 1.1 | +Describes how user agents should map WAI-ARIA features to platform accessibility APIs. Other Accessibility API Mappings specifications depend on and extend this Core specification for specific technologies, including native techology features and WAI-ARIA extensions. | +In Working Drafts; Approaching readiness for CR | +Editors' draft of Core Accessibility API Mappings 1.1 | +Formal published version of Core Accessibility API Mappings 1.1 | +
Accessible Name and Description: Computation and API Mappings 1.1 | +Describes how user agents determine names and descriptions of accessible objects from web content languages and expose them in accessibility APIs. | +In Working Drafts; Approaching readiness for CR | +Editors' draft of Accessible Name and Description: Computation and API Mappings 1.1 | +Formal published version of Accessible Name and Description: Computation and API Mappings 1.1 | +
Accessible Rich Internet Applications (WAI-ARIA) 2.0 | ++ | Work to begin after ARIA 1.1 wraps up | ++ | + |
ARIA Extensions | +||||
Digital Publishing WAI-ARIA Module 1.0 | +Defines a WAI-ARIA module encompassing an ontology of roles, states and properties specific to the digital publishing industry. This allows an author to convey user interface behaviors and structural information to assistive technologies and to enable semantic navigation, styling and interactive features used by readers. It is expected this will complement HTML5. | +In Working Drafts | +Editors' draft of Digital Publishing WAI-ARIA Module 1.0 | +Formal published version of Digital Publishing WAI-ARIA Module 1.0 | +
Graphics WAI-ARIA Module 1.0 | +Defines a WAI-ARIA module of roles, states, and properties specific to web graphics. These semantics allow an author to convey user interface behaviors and structural information to assistive technologies and to enable semantic navigation, styling and interactive features used by readers. It is expected this will complement HTML5 and SVG2. | +In Editors' Drafts, FPWD anticipated soon | +Editors' draft of Graphics WAI-ARIA Module 1.0 | +Formal published version of WAI-ARIA Graphics Module 1.0 | +
Cognitive WAI-ARIA Module 1.0 | ++ | Work not yet begun | ++ | + |
WAI-ARIA Interaction Module 1.0 | ++ | Work not yet begun | ++ | + |
Accessibility API Mapping Extensions | +||||
Digital Publishing Accessibility API Mappings 1.1 | +This document describes how user agents maps digital publishing markup to platform accessibility APIs based on the Core Accessibility API Mappings specification for user agents. | +In Editors' Drafts | +Editors' draft of Digital Publishing Accessibility API Mappings 1.0 | +Formal published version of Digital Publishing Accessibility API Mappings 1.0 | +
HTML Accessibility API Mappings 1.1 | +This document describes how user agents map HTML5.1 [[!HTML51]] elements and attributes to platform accessibility API roles, states and properties on a variety of platforms, based on the Core Accessibility API Mappings specification for user agents. This document is designed to leverage these core mappings for the HTML5.1 host language. | +In Working Drafts | +Editors' draft of HTML Accessibility API Mappings 1.0 | +Formal published version of HTML Accessibility API Mappings 1.0 | +
SVG Accessibility API Mappings 1.1 | +This document describes how user agents maps SVG2 markup to platform accessibility APIs based on the Core Accessibility API Mappings specification for user agents. | +In Working Drafts | +Editors' draft of SVG Accessibility API Mappings 1.0 | +Formal published version of SVG Accessibility API Mappings 1.0 | +
ARIA Support Resources | +||||
Requirements for Accessible Rich Internet Applications 1.1 | +This roadmap that describes the problem, what W3C specifications will be used to correct the problem, and the timeline for the new specifications. | +Work not yet begun | +Editors' draft of Requirements for Accessible Rich Internet Applications 1.1 | ++ |
WAI-ARIA Authoring Practices 1.1 | +Provides recommended approaches to create accessible Web content using WAI-ARIA roles, states, and properties to make widgets, navigation, and behaviors accessible. Also describes considerations that might not be evident to most implementors from the WAI-ARIA specification alone. | +in Working Drafts | +Editors' draft of WAI-ARIA Authoring Practices 1.1 | +Formal published version of WAI-ARIA Authoring Practices 1.1 | +
Using WAI-ARIA in HTML | +Practical guide for developers on how to add accessibility information to HTML elements using the WAI-ARIA 1.1 in HTML 5.1, which especially helps with dynamic content and advanced user interface controls developed with Ajax, HTML, JavaScript, and related technologies. | +In Working Drafts | +Editors' draft of Using WAI-ARIA in HTML | +Formal published version of Using WAI-ARIA in HTML | +
User Context Properties | +||||
User Context Properties 1.0 | +defines a set of preferences that users can choose to expose to web applications. Web applications can use this information to optimize the presentation without a requirement to target a specific device, operating system, or locale. | +Handover from IndieUI WG not yet completed | ++ | + |
+ Specification + | ++ Status (22 September 2023) + | ++ Projection + | +||||
---|---|---|---|---|---|---|
+ FPWD + | ++ CRD + | ++ CRS + | ++ PR + | ++ Rec + | +||
+ * WAI-ARIA 1.2 [WAI-ARIA Editors' Draft] + | ++ W3C Recommendation + | ++ 19 July 2018 + | ++ 8 Dec 2021 + | ++ | + 28 March 2023 + | ++ 6 June 2023 + | +
+ * Core Accessibility API Mappings [Core-AAM Editors' Draft] + | ++ Living Standard + | ++ 19 July 2018 + | ++ 22 November 2022 + | ++ Q3 2023 + | ++ | + |
+ * Accessible Name and Description: Computation and API Mappings 1.1 [AccName-AAM Editors' Draft] + | ++ Living standard -- Waiting on changes needed for ARIA 1.3 + | ++ 11 July 2019 + | ++ | + Q2 2023 + | ++ | + |
+ HTML Accessibility API Mappings [HTML-AAM Editors' Draft] + | ++ Living standard + | ++ 7 April 2015 + | ++ | + Q2 2023 + | ++ | + |
+ Digital Publishing WAI-ARIA module [DPub ARIA Editors' Draft] + | ++ | + 26 August 2021 + | ++ | + Q3 2023 + | ++ | + |
+ Digital Publishing WAI-ARIA Accessibility API Mappings [DPub-AAM Editors' Draft] + | ++ Living standard + | ++ 30 November 2021 + | ++ | + Q3 2023 + | ++ | + |
+ SVG Accessibility API Mappings [SVG-AAM Editors' Draft] + | ++ Living standard + | ++ 26 February 2015 + | ++ | + Q3 2023 + | ++ | + |
Date: Updated 13 March 2024.
+--- + +{::nomarkdown} +{% include box.html type="start" class="" %} +{:/} + +**Accessibility: Essential for some, useful for all.** + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::nomarkdown} +{% include box.html type="start" h="2" title="Brief" class="full" %} +{:/} + +**[WAI develops…](/resources/)** + +- guidelines that are widely regarded as the international standard for web accessibility +- support materials to help understand and implement web accessibility +- free online resources, through international collaboration + +**[WAI welcomes…](/about/participating/)** + +- participation from around the world +- volunteers to review, implement, and promote guidelines +- dedicated participants in Working Groups + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include_cached toc.html type="end" %} +{:/} + +## W3C WAI + +The World Wide Web Consortium ([W3C](https://www.w3.org/about/)) is an international [public-interest non-profit organization](https://www.w3.org/news/2022/w3c-to-become-a-public-interest-non-profit-organization/) where Member organizations, a full-time staff, and the public work together to develop Web standards. Web Accessibility Initiative (WAI) is a key aspect of W3C's work for the benefit of humanity. + +This information extends the [W3C Mission](https://www.w3.org/mission/) (vision, identity, design principles, strategic goals) to address accessibility specifics. + + +Date: Updated 29 April 2024.
+Editor: Shawn Lawton Henry.
+Developed with the Education and Outreach Working Group (EOWG).
+--- + +{::nomarkdown} +{% include box.html type="start" h="2" title="Summary" class="full" %} +{:/} + +This page explains how you can participate in WAI work, for example: +* implement, promote, and review guidelines and other accessibility resources +* share your input on drafts via GitHub or e-mail +* occasionally participate in e-mail discussions +* contribute significant time to participate in a Working Group or Task Force + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::nomarkdown} +{% include toc.html type="start" title="Page Contents" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +## Introduction + +The W3C Web Accessibility Initiative (WAI) provides an international +forum for collaboration between industry, disability organizations, +accessibility researchers, government, and others interested in web +accessibility. + +We encourage individuals and organizations around the world to +participate in activities that help improve accessibility of the web. + +_After you read this page_, if you have any questions about getting involved with WAI or would like more information, please send specific questions to [Shawn@w3.org with CC to wai@w3.org](mailto:shawn@w3.org?cc=wai@w3.org&subject=Participating%20in%20WAI). + +## Getting News of Drafts for Review {#news} + +Draft documents for review are listed on the WAI home page and [News page](https://www.w3.org/WAI/news/). + +You can subscribe to get announcements via e-mail, Atom/RSS feed, or Twitter from [Get WAI News](https://www.w3.org/WAI/news/subscribe/). + +## Participating in Guidelines and Groups + +See [**How WAI Develops Accessibility Guidelines through the W3C +Process: Milestones and Opportunities to +Contribute**](http://www.w3.org/WAI/intro/w3c-process.php) to learn how +WAI works through a process designed to: + +- ensure broad community input, and +- encourage consensus development. + +### Reviewing Guidelines and Documents + +WAI welcomes comments on documents at any time. Comments are handled +differently depending on the stage of document development. See the +["Feedback on Specific Documents" section of Contacting +WAI](http://www.w3.org/WAI/contacts#feedback-on-specific-documents) for where to send +comments. + +One of the best times to comment on developing documents is during a +formal period for public review. To get notified of open calls for review, see via e-mail, Atom/RSS feed, or Twitter from [Get WAI News](https://www.w3.org/WAI/news/subscribe/). + +### Participating in Community Groups {#CGs} + +W3C Community Groups cover a variety of topics. **Anyone can join Community Groups**, after getting a free W3C account and signing the W3C Community Contributor License Agreement. Of the long [list of Community Groups](https://www.w3.org/community/groups/), here are some that are actively seeking contributors: +* [ACT Rules CG](https://www.w3.org/community/act-r/) +* [ARIA and Assistive Technologies CG](https://www.w3.org/community/aria-at/) +* [Cognitive Accessibility CG](https://www.w3.org/community/coga-community/) +* [Immersive Captions CG](https://www.w3.org/community/immersive-captions/) +* [Low Vision CG](https://www.w3.org/community/low-vision/) + +### Participating in Interest Groups {#IG} + +The [WAI Interest Group (WAI IG)](/WAI/IG) is for general discussion and +feedback on all areas of WAI's work. Most of the interaction within WAI +IG is through the public mailing list. + +### Participating in Working Groups {#WGs} + +Anyone can join a WAI Working Group public mailing list, or read the +public list archives. + +There are specific criteria for formally joining Working Groups, +including requirements for participation and contribution. See the +Working Group pages below to find what group best fits your interests +and to get information on participation. + +- **[EOWG](/WAI/EO)** — The Accessibility Education and Outreach Working Group develops awareness, training, and implementation resources supporting web accessibility. **See [Participating in EOWG](/WAI/EO/participation)**. +- **[AG WG](/WAI/GL)** — The Accessibility Guidelines Working Group develops guidelines for web pages, web applications, and other web content. **See [Participating in AG WG](/WAI/GL/participation)**. +- **[APA](/WAI/APA/)** — The Accessible Platform Architectures (APA) Working Group reviews W3C's specifications for accessibility support and develops technical support materials. **See [Participating in APA WG](/WAI/APA/participation)**. +- **[ARIA](/WAI/ARIA/)** — The Accessible Rich Internet Applications Working Group develops a suite of accessible rich internet applications (ARIA) resources, and accessible APIs and mappings. **See [Participating in ARIA WG](/WAI/ARIA/participation)**. + +## Sponsoring WAI + +WAI's work is supported in part by sponsorship from industry, +disability, and government organizations interested in contributing to +WAI's efforts to make the web more accessible. Please see the list of +current sponsors and information on WAI sponsorship at [Sponsoring +WAI]({{"/about/sponsoring/" | relative_url }}). + +## Promoting and Implementing Web Accessibility + +### Promoting Awareness + +- Share links to the "[Perspectives Videos](https://www.w3.org/WAI/perspective-videos/)" that show the impact of accessibility and the benefits for everyone in a variety of situations. +- Encourage people to take the [Digital Accessibility Foundations - Free Online Course](https://www.w3.org/WAI/fundamentals/foundations-course/) +- Cover web accessibility in [presentations and + training](http://www.w3.org/WAI/train) to web developers, designers, + managers, and others. Use the [curricula](https://www.w3.org/WAI/curricula/) to develop courses. +- Look around the W3C WAI website to find other [accessibility resources](https://www.w3.org/WAI/resources/) for specific [roles](https://www.w3.org/WAI/roles/) that you want to share on social media, within your organization, and with others. + +### Implementing Guidelines + +- Use the [Web Content Accessibility Guidelines + (WCAG)](/standards-guidelines/wcag/) documents to help make your website + accessible. +- If you develop authoring tools—any software or service that people + use to create or modify web content, including content management + systems—implement the [Authoring Tool Accessibility Guidelines + (ATAG)](/standards-guidelines/atag/). +- If you develop web browsers, media players, assistive technologies, + or other user agents—implement the [User Agent Accessibility + Guidelines (UAAG)](/standards-guidelines/uaag/). + +### Encouraging Accessibility + +- [Encourage authoring tools](/WAI/impl/software) to meet + [ATAG](/standards-guidelines/atag/) by directly contacting + vendors and requesting increased accessibility support in future + versions, and by purchasing tools that provide the best support for + accessibility. +- Encourage web browsers, media players, assistive technologies, and + other user agents to meet + [UAAG](/standards-guidelines/uaag/). +- Encourage organizations to [adopt an accessibility + policy](/WAI/impl/pol) for their organization that defines their + commitment to web accessibility. +- Encourage websites to be accessible, for example, provide [feedback + on inaccessible websites and inaccessible web products + carefully](http://www.w3.org/WAI/users/inaccessible.html); it is + usually more productive to start with a positive encouraging tone, + rather than a negative critical tone. diff --git a/pages/about/projects/easy-reading/index.md b/pages/about/projects/easy-reading/index.md new file mode 100644 index 00000000000..3b867b8c3e1 --- /dev/null +++ b/pages/about/projects/easy-reading/index.md @@ -0,0 +1,72 @@ +--- +# Translation instructions are after the "#" character in this first section. They are comments that do not show up in the web page. You do not need to translate the instructions after #. + +title: "Easy Reading" # Do not translate "title:". Do translate the text after "title:". +nav_title: "Easy Reading" # A short title that is used in the navigation +lang: en # Change "en" to the translated language shortcode +last_updated: 2019-09-09 # Put the date of this translation YYYY-MM-DD (with month in the middle) +permalink: /about/projects/easy-reading/ # Add the language shortcode to the end; for example /fundamentals/accessibility-intro/fr +ref: /about/projects/easy-reading/ # Do not change this +feedbackmail: wai@w3.org +--- + +![An EU Project]({{ "/content-images/about/eu.svg" | relative_url }}){:.right.small} + +{::nomarkdown} +{% include_cached box.html type="start" h="2" title="Introduction" class="full" %} +{:/} + +This page provides information on the **Easy Reading** project, a European Commission (EC) co-funded project, Horizon 2020 Program (780529). See also the [Easy Reading Project Website](https://www.easyreading.eu/). + +{::nomarkdown} +{% include_cached box.html type="end" %} +{% include toc.html type="start" title="Page Contents" %} +{:/} + +{::options toc_levels="2" /} + +- The TOC will replace this text. +{:toc} + + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +## Project Work Packages +{:#deliverables} + +- **Work package 1: IPAR-UCD Methodology Development and Application** - Development of an Inclusive Participatory Action Research approach for Research & Development (R&D) in the sense of design based research. +- **Work package 2: Innovative Interface Concepts** - Development of adaptive personalized user interface concepts for people with cognitive and learning disabilities. +- **Work package 3: Tracking and Reasoning** - Development of tracking engines that uses sensors (eye-tracker, MS Kinect etc.) and user input (touches, mouse movement, keystrokes) to detect and understand the situation the user faces (e.g. attention, stress, confusion). +- **Work package 4: Profiling and Knowledge Base** - Development of a module for individual user profiles and knowledge base and a system that is capable of matching requests for content simplification. +- **Work package 5: Automatic Translation/Annotation/Adaptation of Content and User-Interaction** - Implementation of services to feed the interface with alternative format and display of content. +- **Work package 6: Easy Reading Service Framework** - Development of a customizable and adaptable framework for adaptable digital content that is robust, flexible and validated. +- **Work package 7: Guidelines for Cognitive Web Accessibility** - Introduce and explain the use of W3C standards relevant to the project and relay feedback and learnings from the project into the W3C standardization process, to improve resources on people with cognitive and learning disabilities. +- **Work package 8: Privacy, Safety, Ethics** - Ensuring that issues related to privacy, safety and ethics are identified, avoided and mitigated in all parts and aspects of the project. +- **Work package 9: Dissemination, Exploitation & Communication** - Promoting adoption, because without adoption the social and business potential cannot be actualised. +- **Work package 10: Management** - Ensuring the effectiveness and the efficiency of the envisaged activities, pursuing the project’s objectives according to the time schedule, budget and the quality standards established, harmonizing the project results + +## About the Project +{:#about} + +Easy Reading, is an Research and Innovation Action project, co-funded by the European Commission (EC) under the Horizon 2020 program (Grant Agreement 780529). The project is coordinated and led by the Johannes Keppler University (JKU), with the following partners: + +- Johannes Keppler University (JKU), Austria, project coordinator +- Athena, Israel +- Dart, Sweden +- Funka, Sweden +- KI-I, Austria +- PIKSL, Germany +- Texthelp, UK +- TUD, Germany +- W3C, France + +Easy Read aims to enable people with cognitive disabilities to better read, understand, and use web pages. It develops a software tool that supports people with cognitive and learning disabilities to personalize the content, such as simplifying the text and adjusting the presentation of text. + +The role of the W3C Web Accessibility Initiative (WAI) in this project is two-fold: + +- Inform the project on relevant developments in W3C standardization, such as work on [personalization](https://www.w3.org/WAI/personalization/), [pronunciation](https://www.w3.org/WAI/pronunciation/), and requirements and supplemental guidance on people with [cognitive and learning disabilities](https://www.w3.org/WAI/cognitive/); +- Relay relevant feedback and learnings from the project, such as implementation experiences and user needs from working with the community, into W3C standardization and resource development. + +See the [Easy Reading Project Website](https://www.easyreading.eu/) for more information on this project and contact [Shadi Abou-Zahra](http://www.w3.org/People/shadi/) for more questions on W3C's participation in this project. diff --git a/pages/about/projects/index.md b/pages/about/projects/index.md new file mode 100644 index 00000000000..cd0fdc950d8 --- /dev/null +++ b/pages/about/projects/index.md @@ -0,0 +1,30 @@ +--- +title: "WAI Projects" +nav_title: "Overview" +lang: en +permalink: /about/projects/ +ref: /about/projects/ +feedbackmail: wai@w3.org +class: tight-page +--- + +## Current Projects + + * [WAI-CooP](/about/projects/wai-coop/) + * [WAI-Guide](/about/projects/wai-guide/) + * [WAI-Core Ford](https://www.w3.org/WAI/about/projects/wai-core-ford/) + * [WAI-Core 2015, 2020](https://www.w3.org/WAI/CORE2015/) + +## Previous Projects + + * [WAI Expanding Access](https://www.w3.org/WAI/expand-access/) + * [Easy Reading](https://www.w3.org/WAI/easy-reading/) + * [WAI-Tools](/about/projects/wai-tools/) + * [WAI-Core 2010](https://www.w3.org/WAI/CORE/) + * [WCAG TA](https://www.w3.org/WAI/WCAGTA/) + * [WAI-DEV](https://www.w3.org/WAI/DEV) + * [WAI-ACT](https://www.w3.org/WAI/ACT/) + * [WAI-AGE](https://www.w3.org/WAI/WAI-AGE/) + * [WAI-TIES](https://www.w3.org/WAI/TIES/) + * [WAI-DA](https://www.w3.org/WAI/WAIDA/) + * [WAI-DE/WAI-TIDE Final report](https://www.w3.org/WAI/TIDE/FR2.htm) diff --git a/pages/about/projects/wai-coop/Title_of_the_Contribution_-_Submission_for_WAI-CooP_and_W3C_APA_Symposium.html b/pages/about/projects/wai-coop/Title_of_the_Contribution_-_Submission_for_WAI-CooP_and_W3C_APA_Symposium.html new file mode 100644 index 00000000000..64de81ce74f --- /dev/null +++ b/pages/about/projects/wai-coop/Title_of_the_Contribution_-_Submission_for_WAI-CooP_and_W3C_APA_Symposium.html @@ -0,0 +1,58 @@ + + + + +[Note to submitters: Keep the word count under 1500 words. Do not add to or change the document style; styles will be removed prior to publication. Ensure that your contribution is accessible (markup headings, paragraphs, lists, tables, citations, acronyms, and other document structures, and provide text alternatives for illustrations, graphics, and other images and non-text content; please refer to How To Meet WCAG 2.1 for more information); inaccessible contributions can not be accepted. Do not remove the following paragraph:]
+This is a submission for the WAI-CooP and the W3C APA Symposium on Research and Development Questions in Digital + Accessibility. It has not yet been reviewed or accepted for publication. + + +
[Please enter at least three keywords.]
+ +[This section should describe the research question about digital accessibility that the authors are trying to solve.]
+ +[On which previous work does this topic root on?]
+ +[Major obstacles found and possible strategies to address the problem.]
+ +[The main outcomes, lessons learned, and mistakes made.]
+ +[What future perspectives does this work open?]
+ +[This section is optional. Citations are made as in the "References" section below.]
+ +[Please use the following format for any citations and references.]
+Personalisation, Accessible Media, User Profile
+ +Selecting an accessible media service is often a binary option – either on or off, where one option is supplied to all no matter the degree or need. Audience requirements are very different and range from 100% loss of a sense to occasional need for + assistance. Furthermore, accessible media services continue to only address sight and sound assistance, which does not help participation for those with reduce motor functions or with understanding or learning difficulties - often more than one condition + is present leading to co-morbidity.
+ +Developers need to understand and incorporate the wide range of requirements for people with a range of abilities. A ‘one-size-fits-all’ approach can be the easiest option to implement, rather than developing different options for the same website, + application or audiovisual media, for people with a range of abilities. Solutions are often not scalable when applied to platforms with a range of accessibility options.
+ +The role of the ITU Audio Visual Accessibility Group is to investigate and suggest options and solutions that can be applied to any form of media no matter how produced, distributed, or reproduced.
+ +Researchers have explored ways to adapt the same content to meet the needs of different users based on a user profile. The SUPPLE project at University of Washington, IBM Web Adaptation technology and AVANTI browser are notable examples, mostly working + for people with different range of visual and motor impairment.
+ +ISO-FDIS 9241-129 published a standard on managing user profiles for software individualization in 2010. The European Union Virtual User Modelling and Simulation (EU-VUMS) cluster took an ambitious attempt to publish an exhaustive set + of anthropometric, visual, cognitive, auditory, motor and user interface related parameters for adapting man-machine interfaces of automobile, consumer electronics, audio-visual media and so on.
+ +ISO/IEC 24756 published the concept of Common Access Profile for accessible and assistive devices.
+ +ITU Focus Group on Smart Cable Television Technical Report. ITU Study Group Work
+ +ITU-T Study Group 9 carries out studies on the use of telecommunication systems in the distribution of television and sound programs supporting advanced capabilities such as ultra-high definition and 3D-TV. This work covers the use + of cable and in conjunction with other groups, hybrid networks – primarily designed for the distribution of television and sound programs to the home – as integrated broadband networks to provide interactive voice, video and data services, including + Internet access.
+ +ITU-T Study Group 16 is responsible for studies relating to ubiquitous multimedia applications, multimedia capabilities for services and applications for existing and future networks, including the coordination of related studies across + the various ITU-T SGs. It is the lead study group on multimedia video coding, systems, and applications; multimedia applications; telecommunication/ICT accessibility for persons with disabilities; human factors; intelligent transport system (ITS) + communications; digital and e-health; Internet Protocol television (IPTV) and digital signage; and e-services.
+ +ITU-R Study Group 6 looks at programme production and content exchange between distributors and broadcasters and the delivery of broadcasts to users. Information on the progress of current work on some of the techniques and technologies + related to accessibility being applied to programme production is described in ITU-R Reports (including ITU-R BT.2420 and ITU-R BT.2447). These now extend from the original Sight and Sound based accessible technologies to include haptic and cognitive + related studies and trials. All three Study Groups have formed joint Rapporteur Group (IRG-AVA) to combine expertise and ideas with representatives from interest groups, administrations, and industry.
+ +To make media accessible to all, the entire chain from script to reproduction device must understand and contribute to the Quality of Experience of the intended audience. To cater for a diversity of needs, simple on/off systems are a very course option + and do not take advantage of the capabilities and features new and developing technologies offer.
+ +Many of these options are not directly targeted at accessibility, technologies such as object-based media offer ideal opportunities to personalise media to a user’s needs. The challenge is to define the “language” that describes the options available + through a personal common user profile that can be applied to any device. The standardisation of the form of such user profiles is an important objective.
+ + + +Fig 1. Ranges of human needs for sharing the media experience
+ +As computer-based devices evolve, people who need assisted access to information through multiple devices will use different sets of applications and software platforms. Ideally, an accessibility service should be available on any device and application + irrespective of underlying hardware. Responsive design of applications and web pages can be considered as an example of automatic adaptation of layout based on screen size and platform of deployment.
+ +This requires information about a user’s needs to personalize the content with respect to their range of abilities. A user profile can be defined as a representation of a user model while a user model can be defined as a machine-readable description + of user. After creation of a single user profile a model can,
+ +Examples of interface adaptation across multiple devices and platforms using a common user profile format are possible are show in Figure 2.
+ + + +Figure 2. Example of interface personalization using common user profile format
+ +Profiles can save details such as color contrast, font size, inter-element spacing of icons etc. which can be applied across IP and traditional smart TVs, desktop and laptop computers, smartphones and low-end mobile devices.
+ +Developing content that can exploit a Common User Profile requires authors to explicitly state font size, color contrast for general content as well as for closed caption or subtitles.
+ +Storing and sharing information about users always bring security risks, and unintended use not authorized by the end user. Security and privacy must be fully integrated into all aspect of a Common User Profile when gathering personal information. Sharing + of actual content is not necessary, the personalization algorithms can run on user profiles stored on local devices allowing the user to choose if and how information is shared between their own devices connected to their own secure data services. + Standardization ensures personalization without the risk of wider sharing of an individual user’s data.
+ +The target outcome is to provide a common data set which can describe how accessible media options are created, exchanged, distributed, and consumed by providing,
+ +The internet and internet connected devices have become a vital part of media. The standardisation of user profiles represents a major step toward greater media personalisation.
+ +The next step would be the automation of personalisation rather than by direct and possibly repeated human user input. Future work should include the development of AI techniques to make intelligent and adaptive service profiles that will allow the + automatic alignment of user needs with the accessibility services available. These needs may change with content type, age of the user and the environment the user is in at the time (home, public transport, office…). The experience and needs arising + from a live sport programme may be very different from a pre-recorded drama or a live entertainment programme.
+ +Today’s accessibility services are limited to subtitle/captions, audio description, signing and audio subtitles/captions. The future will see the development of services based on the interaction through different modalities such as haptic signalling, + brain computer interaction (BCI), or gestures, and the interaction needs of cognitive or reduces motor. Services will make ever greater use of Machine and AI Learning.
+ +Our target is to “leave no-one behind!”.
+ +This work has been partially funded by the European Commission funded project Media Verse: A universe of media assets and co-creation opportunities at your fingertips with the grant number 957252. Pilar Orero is part of the TransMediaCatalonia research + group (2017SGR113).
+ +accessible PDFs, conference guidelines, machine learning
+ +Accessing technical information can be a significant challenge for those who have visual impairments. The Royal National Institute for the Blind identifies several key barriers for those who are blind, including access to technical notation and visual + resources, difficulty interpreting visual concepts, exclusionary teaching methods, and more (6). While some of these barriers may be unintentional, their impact is worsened by alarming rates of Braille + illiteracy, since that is an important skill for comprehension of words and notation-based concepts like math (1, 10, 15).
+Inaccessible technical content continues to thrive in the most ubiquitous component of academia: research papers. One of the authors of this work is a blind graduate student with an advanced technical degree. A recent experience of writing and submitting + a research paper to a top conference uncovered several issues in the way the academic community is approaching accessibility. Most major conferences have guidelines for creating accessible materials, but we followed those guidelines, which required + a paid version of Adobe Acrobat Pro, only to find the resulting file was very difficult to use.
+There were a couple reasons for this. First, the file was compiled using Overleaf, which does not produce a properly tagged PDF. Second, even though + Acrobat Pro can add tagging after the fact, our preferred reader is Preview in Mac OS. As Preview does not fully support tagging, the requirement to produce a PDF with line numbers made the resulting file unreadable using Apple's VoiceOver software.
+It led us to ask the questions: why should a blind researcher be unable to use their preferred reader for technical papers? Sighted researchers generally use any PDF tool they wish. More generally, we ask if the best practices are always best? Given + that those with disability continue to be underrepresented in technical fields (3), there seems to be a problem. We believe that practices, standards, and software limitations contribute to this problem, + and we outline three key issues around these points.
+ +A recent study found that a significant gap remains in employment rates between those with visual impairments or other disabilities as opposed to those who report having no disability (12). While schools + and companies have been making strides to be more inclusive and promote diverse voices, underlying issues remain (17). One study suggested that young students who are blind are just as likely to + want to pursue a technical degree as their peers, but a lack of access to resources and non-inclusive teaching methods can reduce their confidence and ultimately diminish their future prospects for such careers (2, + 20). Others point to the lack of developing Braille skills at a young age as negatively impacting literacy and learning (15, 16).
+Those who have a disability continue to fight barriers in technical fields. This includes lack of teacher awareness for providing materials and a general lack of access to resources (20), although + a move to better teaching practices and universal design may help address this issue (7). Studies have found a disparity in the awarding of grant funding for research by those with disability (18). Combined with our own observations of attempting to navigate academic websites, it is understandable how individuals with a disability remain underrepresented in the scientific workforce (3, + 8).
+ +This is a complex topic with many associated challenges. The authors of this paper have encountered many issues with inaccessible websites and requirements that are accidentally exclusive. For this paper we focus specifically on access to technical + materials in the form of research papers as PDFs. We break the problem down to address three key aspects based on our experiences:
+It is important to recognize that the landscape of accessible technologies is constantly changing, so while we hope these issues will mostly be resolved in the future, we focus on more immediate outcomes in the form of practical suggestions for updates + to conference and journal policies. We then discuss the future aspects on limitations of current software and the need for better approaches.
+ +Many guidelines for accessible PDFs rely on tagging (4, 5). Tagged PDFs are desirable, but in light of software limitations, it would be ideal if PDFs emphasized + having a clear document structure instead of relying entirely on tagging. For example, two column layouts are more complex than single column layouts and may be difficult for screen readers to follow without tagging.
+Figure 1 demonstrates these simple attributes on an axis of accessibility. It shows three sample PDF layouts with generic filler text, and they are aligned left to right, from inherently less accessible to more. The first sample uses a two column layout + with line numbers. While it might be the most efficient for quick reviewing by sighted readers, it is likely to require tagging and need specific software for a blind reviewer. The second sample show a one column layout with line numbers, which makes + scrolling easier for everyone but still must be tagged due to line numbers. The third sample shows a single column paper with no line numbers. We believe it to be the simplest and inherently most readable of the three.
+ +
+
+
Figure 1: A sample of three PDF layouts sorted from left-to-right in terms of ease and accessibility.
+
We recommend that conferences observe a few simple changes to their policies to reduce the risk of undermining their accessibility goals. First, embrace single column and non-numbered layouts. Alternatively, submission sites could provide a separate + upload for this version of the paper. Second, in addition to collecting alternative text as submissions fields, encourage the use of more descriptive captions and in-text references. In our experience, many images would be more accessible if the authors + focused on making the text description and caption more clear, rather than relying only on alt text. For example, authors should be encouraged to state numbers from tables and graphs where appropriate in text. Third, authors should try to explain + their algorithms descriptively instead of only using technical notation.
+ +In an ideal case, everyone could use Adobe Acrobat Pro to create and read PDFs. Unfortunately, the software is only available as a paid solution, which is inherently inaccessible for those who have monetary constraints, and it may not be everyone's + preferred PDF viewer. We use Apple's Preview. The lack of proper tagging support is an important accessibility issue and must be addressed by Apple, but this is not unique to Preview since many PDF viewers have limited tagging support.
+Additionally, few alternatives exist to Adobe's software for creating tagged PDFs. Microsoft Word can, but LaTeX, which is widely used in technical fields, continues to create inaccessible PDFs, although there are efforts underway to address this limitation + ( + 13). Post-processors don't have access to the sophisticated algorithm Adobe uses for analyzing layout and assigning tags, and they usually only identify when tags and alt text are missing.
+Even if all software supported taggingg, does it address every issue? We argue no. A tagged PDF can still have inaccessible math equations, distracting in-text citations, poor alt text, or other issues associated with the PDF format (19). Sighted individuals often skim over content, having trained themselves to ignore the visual clutter of details like line numbers unless needed. We propose leveraging intelligent interfaces and machine learning to + create software to provide blind researchers more of these capabilities.
+Researchers are already developing some of these ideas, including software to create audio representations of visual spaces (9) and pushing for adoption of more extensible tagging in PDFs (14). Microsoft Word provides an option to automatically recommend alt text. We are working to develop a PDF post-processor to integrate several such features. First, our emphasis is on creating a published tagging algorithm. + Second, we seek to reduce clutter by giving the option to remove attributes like line numbers. Third, we wish to provide a summary of text and, potentially, charts. Our goal is to promote the use of machine learning in assistive interfaces to make + content more accessible for everyone.
+In the meantime, we believe great progress can be made through practice. The Easy to Read and Plain Language criteria may be used as guidance for writing descriptions of figures and equations (11). Standards + should be extended to cover more technical material, as recommended by contemporary research (21); for instance, sonification could be used as an alternative presentation of graphics. File uploads could + encourage alternate versions of the same file, and conferences should request paper sources to generate custom outputs, such as HTML and MathML hybrids.
+ +website design, accommodations, memory limitations, artificial intelligence
+ +The many advances we have made toward a more equitable world for blind people have paralleled those made for women's rights and the rights of racial minorities. The difference is that for these other disadvantaged groups, the only change that is needed + to give equity of opportunity is to remove artificial barriers. This obviously ignores the different starting points that past disadvantage caused, but the removal of an artificial barrier is much simpler (at least in theory) than an intrinsic barrier + that requires functional teaching materials to be translated into materials that can be understood by someone who lacks the advantage of seeing pictures and all of the spatial information that most complex systems of thought rely upon. For instance, + mathematics makes use of the position of different parts of the equation to give some idea about how they relate to one another. A huge hurdle has to be overcome to put the entire formula on one line. The Nemeth code for mathematics achieves this, + but the human brain is very limited in how much of that linearized equation it can read and remember at one time (5). Blind humans do not have computer-like random access memory. Reading more involved + equations spanning several Braille lines can be like trying to remember all of the positions of the pieces on a chess board. Yes, it is technically possible to play chess completely in your head, and some people (even the blind) have. However, this + is not a common skill, and demanding savant powers in order to do these things is no excuse to withhold accessibility.
+ +Demanding that a good system for communicating about mathematics be reworked to make it understandable easily in the absence of vision seems like a petty thing to ask. Thankfully, researchers continue to develop in this field through the creation of + such technologies as MathML (6, 7). However, the example leads us to think about the fact that the ADA sets a standard where only "reasonable" accommodations are required; this then begs the question: At what point are the accommodations 'unreasonable'?
+ +This thought experiment taken to an extreme becomes infeasible. If we must accommodate someone who is both deaf and blind, this requires more resources than if the person is just blind or just deaf. We can grow the number of disabilities that our hypothetical + learner has, until we eventually have a brain with no senses or outputs attached (locked-in syndrome) that needs to be taught math via electrodes implanted into its gray matter directly.
+ +Instead of arguing that the adaptations are just 'for' disability, we should broaden the scope of what we consider a disability. Many learning disabilities are associated with sensory input and output (3, + 4). I experience this because sound is a 2-way road of information. If I am required to hear and understand two things at once, I personally don't get either. If I am required to use verbal working memory + and then have to talk, I have the human equivalent of an "All Interrupt Requests" fault. Keeping verbal information in my memory works OK, until I have to verbalize an output, and then the stored information is at odds with the information being sent + to the voice (output). Consider that vision is a sense with no 'output'. Sighted people have no difficulty looking and talking at the same time. The same is not true of hearing and vocalizing, which use the same parts of the brain and interfere with + one another if presented as tasks simultaneously.
+ + +A number of accessibility "features" have been built with little regard for human memory limitations. A picture is considered worth a thousand words. For the first time, I am getting some idea of how polluted the visual world is. Instead of finding + an unlabeled picture, I am confronted with a long 'word salad' problem, and the digestion isn't easy. Pictures and graphs are placed seemingly randomly in the body of text. It is like hearing several conversations that randomly switch from one to + the next. I am carried along by the analog stream of audio coming from the computer's speech like I am in a kayak on a rapidly flowing river and will go at the same speed 'forward' for as long as the speech runs. Transitions happen unceremoniously. + A page might be interrupted by a picture and preceded and followed by repetitive headers and footers.
+ +To make things even weirder, some websites now label pictures with AI systems. When I first tried to use these, I heard a lot of guesswork coming from the computer, with a particularly entertaining + "paramecium" as one of the things the AI ventured as a guess about the water it was seeing. It would produce long lists of words, which I was supposed to filter for the "reality check" and decide what was relevant. Since those early days, the descriptions + have gotten more accurate, but even without the outright wrong guesses, they are now bloated with the 'objective facts' about the picture. Instead of unlabeled images, websites may now be filled with description of every image, e.g. the color of every + person's outfit. I cannot focus on the parts of a person's visage that might be relevant to me. I would like to know things like young/old or even attractive/unattractive (though this might have political implications). Instead, as someone who has + never experienced color, I am told about the style of clothing and the color that the person chose to wear. I can understand that training AI is hard and intensive in both computing and energy. The AI trainer is going for the "low hanging fruit", + rather than asking what the description is supposed to do. The description is needed to bring the user a clear idea of why the picture is there and its relevance to the overall presentation. At no point is my comprehension of a scene improved by knowing + that a "black sleeveless top" is involved.
+ +The problem is that the image is now a clutter of irrelevant information. In the midst of trying to understand something, I am presented with this extraneous information that breaks my flow of thought and comprehension of the work I am doing.
+ +This is similar to the experience of being on a site with random popup ads, except I cannot simply block the descriptions of the pictures, as they might actually have useful information. So to make this analogy more interesting, the popups have vital + information interspersed among the really annoying ads. This is not just for fun; the work I do on the computer is important to me and to other blind people. Upping the ante, these popups aren't just happening when the user is doing fun stuff and + they are accepting that their video is free as long as they watch the ads. No, this is a case where my work is being impeded by random information that adds nothing, until… it says something vitally important. It is like listening to a stream of consciousness + description of someone's dream while trying to get work done.
+ + +It can seem that rather than solving problems, the AI introduces noise. Sighted people overlook this because they can skim the picture, the description, or both. If they were forced to read exactly what was on the page serially, they would place a much + higher premium on the relevance of the information being presented.
+ +Furthermore, this exposes a disconnect between developers / designers and their users. A lot of this confusion could be coming from the cascading style sheets (or CSS) versus the innate document object model (or DOM) of the website. Style sheets are + enabling visual layouts that do not match the underlying structure (2), further complicating the job of the screen reader and the person using it.
+ +While we continue to advocate for best web practices, we should be applying AI to this problem to help struggling users. Specifically, we need a web tool that is able to reduce this clutter and better understand the visual layout without relying on + just the document structure. Some websites may already be using AI to generate accessible content or to re-arrange content to improve its readability, but these attributes do not specify whether the image description was generated by an AI or a human. +
+ +There are as yet no standards for AI and web accessibility, and the W3C could consider creating new standards to address the use of AI, both by the user and by the developer, to generate descriptive content or otherwise change the presentation of an + app or website. Recent discussion from W3C members has considered the benefits and limitations of AI in this space, with the goal of addressing accuracy and reliability issues through conformance evaluation (1). + Providing more transparency about the use of AI on websites will empower all users, while promoting trust in this new and emerging technology.
+ ++ Digital Accessibility, Accessibility Standards, Web Accessibility Directive, EN301549, WCAG +
+ ++ The adoption of policies and laws at international, European and national levels has placed accessibility at the forefront, requiring the countries of the European Union to ensure that any user can access and interact with any software, web content, documents + and hardware regardless of their capabilities. As stated by the + + United Nations Convention on the Right of Persons with Disabilities + (UNCRPD), accessibility is a right to equal opportunities for all citizens. +
+ ++ In 2010, the UNCRPD was ratified by the European Union and the EU Member + States and consequently a solid legislative framework for improving the + situation of people with disabilities was created. At the legislative + level there are three key directives that regulate accessibility in the + European Union: the + Web Accessibility Directive (2016), the updated + Audiovisual Communication Services Directive (2018) + and the + + European Accessibility Act (2019). The standardisation agencies of each Member State must adapt their laws + to transpose this legislative framework in their countries, in order to + ensure access to media and digital content for all citizens. +
++ In 2015, a UN committee conducted its first review of how the EU complies + with the UNCRPD. The Committee noted that the EU institutions needed to + make more efforts to fulfill their commitment to make communication + accessible to people with disabilities. In a decision of the European + Ombudsman (Strategic Survey OI /June 2017 6/2017 /EA) on how the European Commission ensures that people with disabilities can access its websites from December 2018, clearly states that there is a need for Mandatory accessibility training + for all staff working on websites. Hence the creation of the IMPACT project. +
+ ++ The IMPACT project (Inclusive Method based on the Perception of Accessibility and Compliance Testing), aims to define the skills and competencies that an educator or mediator in ICT accessibility should acquire and master, for the correct implementation + of the harmonized European accessibility standard digital EN 301 549. +
++ It is a strategic partnership bringing together parties from different fields dealing with innovation in higher education, with the main aim of designing, testing and certifying a modular curriculum in digital accessibility for vocational and academic + training that meets the needs of the labor market and society. +
++ The IMPACT project training will prepare for three different work environments: face-to-face, online and relay. This opens up new employment opportunities for people with and without disabilities. The main objectives of the IMPACT project are to: +
+ ++ The recent adoption and entry into force of these legislative framework has highlighted the scarce training in digital accessibility outside of the technological field (Oncins et al., 2020). Hence, there is a need to provide training in digital accessibility + with a more empathetic driven perspective. +
++ In 2019 the European Disability Forum (EDF) conducted a survey on “Web + Accessibility Directive transposition and implementation” with the objective of measuring awareness regarding: Transposition, Implementation, Accessibility statement, Feedback and enforcement mechanisms. The reported results stated that 42% + of users are not (very) satisfied with the “Web Accessibility Directive transposition and implementation” and the majority of new websites (80%) are found not accessible. Hence, there is a need to improve training on digital accessibility in general. +
++ In 2019, during the a first stage of the IMPACT project a survey was + conducted and it was reported that while most participants are familiar or very familiar with the European Web Accessibility Directive and the international standard WCAG 2.0/2.1., the Standard EN301549 which actually helps and guides organisations + to be accessible remains largely unknown. Hence there is a need to developing specific training for the correct implementation of the European standard EN30149. +
+ ++ The IMPACT project survey included the following four proposed competences to be validated by participants: 1) Understanding digital accessibility, 2) Digital Accessibility context and digital accessibility services, 3) Implementing Digital accessibility + and 4) Digital accessibility promotion. The results obtained reported that all competences are very important or important. The competence with the highest score was ‘implementing digital accessibility’, followed by ‘understanding digital accessibility’ + and ‘digital accessibility promotion’, and finally ‘digital accessibility context and digital accessibility services’. +
++ In a second stage of the project a modular curriculum was created inline with the ECQA guidelines, + which is the IMPACT partner for the job certification. It was validated with five focus groups that were conducted in three different countries (France, Ireland and Spain). An overview of the course curriculum is described below: +
++ Unit 1 Understanding digital accessibility: +
++ Unit 2 Digital accessibility context and digital accessibility services +
+ +Unit 3 Implementing digital accessibility
+Unit 4 Digital accessibility promotion
++ The IMPACT project partners are currently creating four different types of course materials for the course: video lectures, workshops, study cases and quizzes, which will be part of the IMPACT course. +
+ ++ This work has been partially funded by ERASMUS+ IMPACT 2019-1-FR01- KA204-062381 and 2017SGR113. +
+ +Accessibility validation tools, Transparency, Guidelines
+ +Following the adoption of accessibility laws, many public organizations started paying more attention to accessibility guidelines. However, Web accessibility requires constant monitoring of numerous details across many pages in a given site. Thus, to + simplify the monitoring, analysis, detection, and correction of website accessibility problems, several automatic and semi-automatic tools have been proposed. Even though accessibility validation is a process that cannot be fully automated [10], + automatic tools [1] still play a crucial role in ensuring the accessibility of websites. They help human operators collect and analyse data about the actual application of accessibility guidelines, detect non-compliance, and provide + relevant information about addressing the possible problems
+ +Our group has long experience in tools for accessibility evaluation [2, 7, 8]. We have also participated in the Wadcher EU project , with the aim to develop a set of tools to monitor large scale accessibility. + While discussing with users of such tools and other tool developers, we often noticed that such tools differ in their coverage of accessibility guidelines, in how they interpret and to what extent they are able to support them, and in the design of + how they present the results, including errors (and likely errors). Such differences are perceived in different ways by users, are sometimes misinterpreted, and can generate misunderstandings. Better explaining these differences can help create tools + that assist web site designers and developers in making informed decisions, and indicate gaps that could be addressed in future versions of these tools (or in new tools). Unfortunately, this issue has not been sufficiently dealt with in previous studies + of accessibility tools such as [3, 5, 10]. We have thus introduced [6] the concept of transparency of such tools, as well as some criteria that could be used to analyse it, and provided + an initial comparative analysis of four validation tools according to them.
+ +By the transparency of an accessibility validation tool we mean its ability to clearly indicate to its users what accessibility aspects it is able to validate and the meaning of the results generated. The various available tools follow different approaches + to checking accessibility, and have to keep up with the continuous evolution of Web technologies and their use, which imply the need to continuously update their support for the validation of the associated accessibility guidelines. Users are sometimes + not even aware of such differences, and they may become disoriented when they see different results in terms of validation. Thus, it is important to make them more aware of this issue, and provide tool developers with indications for making their + accessibility tools more transparent. To some extent, we face similar issues to those that people are encountering with the increasing deployment of Artificial Intelligence (AI) tools, which often generate various problems for their users since they + do not explain why they are operating in a certain way. Thus, interest in techniques for explainable AI has been increasing in recent times. In this perspective, some researchers have explored the space of user needs for explanations using a question-driven + framework. For example, some authors [9] propose a question bank in which user needs for explainability are represented as prototypical questions users might ask about the AI, such as “Why is this instance given this prediction?”, + “What would the system predict if this instance changes to …?” Some of such questions can still be relevant for accessibility validation tools, even when they do not use AI methods at all.
+ +As stated, transparent tools should enable users to make fully informed decisions based on a clear understanding of how the automatic validation tools work. In particular, in order to be transparent, an automated validation tool should make explicit + the following information: +
We are carrying out work on empirical validation of the issues associated with transparency, in order to define more precisely such concept and the associated criteria, provide an analysis of a broader set of accessibility validation tools according + to such criteria, with the ultimate goal of providing some recommendations for tools developers to improve the transparency of their tools. In this work we consider people who have used accessibility validation tools from different perspectives: web + commissioners (people who mainly decide and manage the content of a Web site), accessibility experts (those who are in charge of actually checking whether an application is accessible), and web developers. We will be happy to share and discuss initial + results of this new work at the workshop.
+ +Chatbots, Conversational Agents, Playbook, Wizard-of-oz
+ +Web platforms offer lower operating costs and increased reach. In addition to websites that provide information and support routine processes, a growing number of sites employ chatbots – chat applications in which the human customer engages in conversation + with an automated (nonhuman) representative – to offer interactive support to users. When done “right,” chatbots save organizations time and money while effectively delivering services to the public. +
+W3C’s Web Content Accessibility Guidelines (WCAG), government’s most widely adopted accessibility guidelines, inform web page design but do not offer clear guidance on many chatbot-specific features. An additional review of 17 other sources (Stanley + et al., 2021) yielded 157 unique recommendations for designing and evaluating chatbot accessibility. Nonetheless, there is currently no comprehensive and agreed-upon method for evaluating or recommending accessibility for chatbots that addresses the + enormous user base government platforms serve. +
+What should development teams know to produce chatbots that provide functionally equivalent experiences to all users? There is a need to organize existing guidance to support practical applications, identify gaps, and develop novel guidance through + research. +
+ +WCAG is the most widely used web accessibility standard. These guidelines primarily focus on the appearance and functionality of static web pages. For example, they instruct on appropriate color contrast, size, and HTML tags for a button to ensure that + users with various levels of vision loss can perceive and operate the button. Dynamic elements like chatbots are more complex and present additional accessibility challenges, such as supporting consistent, accessible navigation between a chatbot and + its containing webpage. +
+Blogs and peer-reviewed articles from industry and academia offer piecemeal guidance. The body of chatbot accessibility guidance is scattered and lacks substantial empirical support. +
+ +Chatbots have interface features and interactions that are novel among web content and not fully addressed by WCAG. Since chatbots deliver messages in sequence, they raise questions about message content length and pacing for users with different types + of needs. Other issues include how the user should be alerted to new messages as they arrive. For each message, depending on whether the chatbot supports free-text input and/or selection from a set of options, keyboard focus must be positioned carefully + relative to web elements to align with user expectations. Over time, responses accumulate in a sizeable “conversation history,” which the user may review. What are the most accessible ways to present that history and make it easily navigable? Finally, + chatbots are often embedded within a website; chatbot elements must integrate with the larger website experience. +
+Since chatbots interact conversationally, their communication styles affect users. For instance, chatbots exhibiting empathy can improve human mood (de Gennaro et al., 2020). This introduces challenges and responsibilities beyond typical web content, + to ensure chatbots do not negatively impact users emotionally or psychologically. In addition, users vary in their preference for anthropomorphism as exhibited by chatbot language style, name, and imagery. Anthropomorphism’s relationship to emotional + engagement is not fully understood (Blut et al., 2021). +
+These unique challenges are complicated by the popularity of third-party chatbot development platforms that easily allow only certain adjustments. This means that accessibility must be considered when selecting a platform and when customizing to meet + user needs. +
+Chatbot accessibility guidance is available, but provenance and specificity varies widely. Consulting firms publish rules-of-thumb adapting existing WCAG guidelines to chatbots (e.g., BoIA, 2020) but do not identify gaps in guidelines. Studies with + small sets of users yield preliminary chatbot-specific guidance (e.g., Baldauf et al., 2018), but results are not consolidated across studies in a way that can be practically applied. +
+ +Stanley et al.’s (2021) survey of existing chatbot accessibility guidance found 17 sources yielding 157 unique recommendations for designing and evaluating chatbot accessibility. We are integrating these recommendations with existing W3C guidance – + including WCAG success criteria and Cognitive and Learning Disabilities Accessibility Task Force (Coga TF) design patterns – into a Chatbot Accessibility Playbook. +
+The playbook comprises five plays. First and arguably most important is selecting a platform. Development teams have expressed to us how hard it is to improve their chatbot’s accessibility because of platform limitations. Therefore, we offer a checklist + to help establish whether a given platform supports playbook accessibility recommendations. The other plays are: designing the chatbot’s content, designing the chatbot’s interface, integrating the chatbot into the website, and testing the chatbot. + We include a checklist for evaluating against recommendations, as well as a questionnaire to be used in user studies. Because existing guidance is disparate and sometimes abstract, we provide concrete activities to help teams implement each recommendation. +
+We conducted a preliminary user study to illuminate gaps and refine playbook guidance. We implemented a wizard-of-oz chatbot prototype – similar to Böhm et al. (2020) – for a tax administration application and tested it with 12 participants: six with + no recognized disability and six with partial or total vision loss. We aimed to follow playbook recommendations when developing the chatbot. We collected quantitative survey and task performance results and qualitative results from users’ reactions + to using the chatbot and responses to open-ended questions. Preliminary analysis of this small study suggests strengths and weaknesses in our process and in existing guidance. Below we discuss several interesting findings from our preliminary analysis. +
+Users pointed out gaps in our attempts to conform to WCAG in a chatbot context. For instance, the color of chatbot elements blended into certain surrounding webpage colors depending on how the page was scrolled and positioned. Also, we failed to provide + visible text labelling of the “open chatbot” button, which would have helped sighted users interpret the button icon; or a header, which would have allowed users with vision loss to locate the chatbot faster. +
+We discovered the current chatbot paradigm may conflict with some users’ needs while helping others. For instance, distinguishing chatbot versus user messages by positioning them on opposite (left or right) sides of the window can help users process + them visually. However, a participant with partial vision loss focused only on the chatbot’s messages on the left side of the window and was completely unaware that user messages were recorded on the right. +
+Keyboard navigation did not match users’ expectations. Keyboard focus generally remained in the free-text input field at the bottom of the chatbot. When a message arrived with response options, users expected that tabbing down would take them to the + options. Instead, it took them out of the chatbot. We suggest either focus should move automatically to any incoming message, or available response options be positioned after the text input field. Users also liked the idea of “bumpers,” elements + positioned immediately before and after the chatbot to let users know they are about to leave the chatbot. +
+Users had difficulty with sequential message pacing; some reported messages were too fast, others too slow, and some did not immediately realize multiple messages in a row had been received. Though sequential messages were labeled “1 of 3 messages, + 2 of 3 messages,” etc., labeling did not give enough clarity. Users were undecided whether consolidating sequential messages into one long message was the optimal solution. Considering WCAG success criterion 3.2.5: Change on Request, as well as published + guidance from the Coga TF, a more appropriate solution could be to advance messages only when requested, such as via a “read more” button. +
+ +The preliminary user study reinforced the need to test digital products with people with disabilities. It identified open questions for additional research, as well as novel feature ideas to test. While our small study presented one prototype to two + groups of users, future studies could compare two different implementations. For instance, users could be presented with one chatbot that sends multiple sequential messages and another that sends longer consolidated messages. +
+Other tradeoffs to explore include the impact of anthropomorphism (in name, imagery, and conversational style) on people with different kinds of disabilities, as well as the layout of messages and conversation history within the chatbot window. Resource + limitations constrained this preliminary study to participants with vision loss, but chatbot accessibility must be researched with users with diverse disabilities. +
+Finally, our research suggests existing mental models for navigating chatbots are not sufficient for assistive technology users. Blind and vision-impaired users are still establishing an understanding of what navigating a chatbot should be like and + frequently referenced form-type interactions during think-aloud sessions. Future work should discover current mental models of chatbot interaction, establish if there is a current paradigm that satisfies accessibility needs and, if not, create and + test possible new models of chatbot interaction. As we converge on a model of interaction that supports diverse users, we must leverage existing user expectations while paving the way for new interaction opportunities. +
+ +This work was funded by MITRE’s Independent Research and Development Program. © 2021 The MITRE Corporation. All rights reserved. Approved for public release. Distribution unlimited 21-3138. +
+We thank the Visually Impaired and Blind User Group (VIBUG) for their invaluable assistance with recruiting. +
+ +>> CARLOS DUARTE: So I think we can start now. Hello, everyone. We are delighted that you are able to join us in this Shape the Future: Research and Development Questions in digital accessibility symposium. And we are looking forward to a set of stimulating + presentations and discussions.
+ +My name is Carlos Duarte. And on behalf of the whole organizing team I would like to offer you our warmest welcome. +
+ +Let me just take a moment to say a big thank you to the wonderful team that made all this possible. My colleague here at the University of Lisbon, Leticia and a bunch of people from the W3C Accessible Platform Architecture Working Group, Joshua, Jason, + Scott, Becky. And, of course, Judy Brewer from the W3C Web Accessibility Initiative.
+ +Before we get going, just a couple of important reminders. By taking part in this symposium, you agree to follow the W3C code of ethics and professional conduct and ensure to promote a safe environment for everyone in this meeting. You can find more + about the code of ethics and professional conduct at https://www.w3.org/consortium/cepc.
+ +Also this session is not being video recorded but is being transcribed. The transcription will be posted on the symposium website later. If you object to it being transcribed, we ask you to reframe from commenting.
+ +I would also like to take this opportunity to thank the European Commission that cofunds the WAI CooP project. And thank you the architecture group and questions task force for all the support in preparing the scientific program. And to thank Open Conf + who kindly provided us with their paper submission and reviewing platform.
+ +Let me describe some of the logistics for this meeting. Audio and video are off by default. Please turn them on only when requested and turn them off again when no longer needed. As you might be aware, in each session we have a period for discussions + after all the session presentations. During the discussion you can enter your questions in the chat feature of Zoom or you can use the raise hand feature.
+ +We will be monitoring both channels. And time allowing, we will try to address your questions during the discussions.
+ +You can also use the chat feature to report any technical issues you are experiencing. Hopefully you won't experience any. But Pascale will be monitoring the chat for these issues. If during the seminar your connection drops, please do your best to + try to reconnect. If it is the whole meeting that gets disrupted, you won't be able to reconnect. We will try to resume the meeting for a period of up to 15 minutes. If you are unsuccessful, we will contact you with further instructions by e mail.
+ +But for the presenters, if you drop, we will wait for one or two minutes for you to try to join. If you are not successful, then we will move to the next presentation. And you will have the opportunity to resume your presentation when you are able to + reconnect. +
+ +So as I have mentioned before, this symposium is one of the results of the WAI CooP project. You can learn more about this project at https://www.w3.org/WAI/about/project/WAI CooP. WAI CooP started in January of this year and will run for two more years. + This means that we will have the opportunity to host two more symposiums. One in each of the project years.
+ +The main goal of WAI CooP is to support the implementation of international standards for digital accessibility. It aims to achieve this goal from multiple perspectives. It will provide different overviews of accessibility aimed resources including + accessibility checking tools and training resources.
+ +It will develop actions like this one today, to promote collaboration between research and development players. And it is already creating opportunities for the stakeholders in this domain, to exchange their best practices through, for example, a series + of open meetings and online Forum.
+ +And as I have just mentioned, this is the first of three symposia that will be organized by the WAI CooP project.
+ +For the first symposium, our main goals are to foster the exploration of state of the art in digital accessibility not only from a research perspective, but also considering all the different practices and development challenges. In particular, our + initial efforts have identified difference in the focus of different communities working in this domain.
+ +This is not a problem per se. But we believe it will be very useful for everyone to become more aware to the challenges that the other communities are facing. And with W3C being a member of the WAI CooP consortium, it became even more relevant in our + efforts to understand how research can guide and be guided by standards. And this will certainly frame some of our discussions today.
+ +Of course, today we also expect to be able to identify topics for the next two symposia. And since I have been going for a bit now, and we are nearing the time to start our first session, let me just finish by introducing you to today's agenda. And + our next session will focus on the scope of accessibility guidelines and standards. There will be two presentations focusing on or introducing us from different perspectives to one of accessibility evaluation and the other of one type of documents.
+ +The following session is focusing on the challenges of accessibility education and how can we promote better education. Between this second and the third sessions, we will have a small coffee break. And the third session will follow with an increasingly + relevant topic and that's AI and accessibility. We will be discussing how can AI contribute. And what limitations it still faces or it creates.
+ +Finally, before the closing session, we will try to summarize the main points that were made in the three sessions, and frame them in the overall objectives of the symposium.
+ +And we can even start a bit ahead of time I would say, a couple of minutes before time. So for the first session I'm delighted to introduce our Moderator, Josh O'Connor from the W3C.
+ +>> JOSHUE O'CONNOR: Thank you. I'm happy to be here with you all to discuss some excellent research and to talk about future opportunities in the area of accessibility which is really, really interesting and exciting. For a brief introduction, my name + is Josh O'Connor. I am in emerging web accessibility technology. I work with the accessible platform architectures group and research questions task force where we look at these interesting and emerging areas to figure out how we can best serve the + People with Disabilities.
+ ++ I am happy to introduce this first session. We are going to have two presentations. And the first is called the Transparency of Automatic Web Accessibility Evaluation Tools. And that's going to be from Fabio Paterno, who is the research director at CNR + ISTI, where he is leading the laboratory on human interfaces and information systems. +
+ ++ One of his topics has been addressing for a long time the design of tools for supporting accessibility validation. So it is a very important topic. Fabio, I'm going to let you get started with your presentation. And I'm going to call you down to two minutes + before it is over so you can wind up on time. We will keep everything, you know, concise, brief and on point. And then we will have a discussion and questions at the end of the two presentations. +
+ +So Fabio, over to you.
+ +>> FABIO PATERNO: Thank you. Can you see my screen?
+ +>> JOSHUE O'CONNOR: Yes, we can.
+ ++ >> FABIO PATERNO: Okay. So good afternoon. We are happy to be here to discuss with you the transparency of automatic web accessibility evaluation tools. We would like to explain why we think this topic is particularly important. We would like to propose + some design criteria that can be used for tool designers and developers to better address disaster and discuss the current state of architecture. +
+ ++ So let's say that our resource laboratory has experienced other designing tools for accessibility evaluation. And the last result is the MAUVE++ tool. So this is a web application, publically available which supports validation of the WCAG 2.1, in particular + supports validation with a number of success criteria and techniques. It also has a community, about 2,000 users who are registered in the so they can perform validation, multiple pages and so on. +
+ ++ We also have recently experienced in the European Wadcher project. We are involved in the design phase of such tool. I pitch at University the digital degree, one of the parts is dedicated to use various tools for performing accessibility validation, + which provided, you know, a number of interesting observations during such exercises. So let's say generally speaking, there is an increasing interest in public organization about accessibility guidelines because of national registration, the European + web directive, and similar initiatives. On the other hand, performing accessibility validation becomes more and more complex, since, you know, for example, the WCAG 2.0 success criterias and many more techniques. +
+ ++ So, of course, we are aware that accessibility validation is something that cannot be fully implemented. It is also clear that this increasing complexity have some thematic support can help in collecting the relevant data, detecting noncompliance, providing + relevant information for addressing the possible problems. +
+ ++ Indeed I mean possibilities to put forward in the recent years. So if we look at the W3 list of tools and we can find 159 tools. And, of course, such tools have to address the challenge of continuously evolving guidance, technology that we use for implementing + websites that are changing continuously as well. +
+ ++ So let's say that it became important to try to better understand what the user experience is when using such tools. So we had some direct observations in various context, interviews, questionnaires. If so we start to collect some information about issues + that people often encounter. They say that often I mean there is not the right expectations, especially at the beginning about such tools. +
+ ++ So maybe at the beginning several people think oh, this tool will I will be able to press a button and I will get a complete validation. And then they soon get frustrated because I mean they get results, which sometimes are not clearly explained. So to + some extent we are in contact with problems that people encounter with AI technology. Recently there is a lot of interesting, explainable area. People get some results and don't understand why they get similar results. +
+ ++ And in particular one aspect that often is that they try different tools they often get different results. If they look at them more carefully, they understand that they cover an area. They may also interpret differently same techniques. And such differences + can really generate a number of misunderstanding, also considering that there are different types of users who look at accessibility from a different perspective. +
+ ++ So let's just say what do we mean by transparency. So we say that we mean the ability of a tool to actually indicate with users what accessibility aspect it is able to validate in a precise manner. And it is able also to present explanation of the information + that the tool users regarding the results of such validation. +
+ ++ And we think that if this concept is fully supported, it would be many benefits for users because they can make more fully informed decisions about the accessibility validation, by having a more clear understanding of how such a tool works. So they can + also better understand why in some cases they provide different results. And it can also be useful for tool developers because they can better identify the gaps that they probably need still to address in the future or new tool developers can think + about, you know, new tools that they are able to cover such gaps. In general we notice this is understudied. There is not a lot of attention, even if it addresses a common thing for many people. +
+ ++ So let's say we would like to propose some design criteria that can be useful to support this concept. So first of all, we think that all the validation tools should be not only to indicate the start of the support, but also in a more precise way what + criteria, techniques they actually support. Because in this way people can understand what they can do. But they can also understand what they cannot do. They should be clear to what they are not able to validate. +
+ +>> JOSHUE O'CONNOR: Sorry. You have two minutes please.
+ ++ >> FABIO PATERNO: Another aspect of how such accessibility results, following the commonly recognized vocabulary, for example, one of the standards. It is important today that indicate a different granularity to a level of accessibility. From small elements + until the accessibility of the entire website. They should be able to indicate results that are meaningful for people with different disabilities in terms of knowledge of the technologies. Should be able to indicate which pages should validate or + not. So we started an analysis. There is no time to report on it. And we also have some empirical data to survey in user test. +
+ ++ And we can say that there is still a lot of work to do. We focus in particular on tools that are publicly available and are free. And these kind of design criteria are not yet fully supported. So we really think that there is a need to improve the tools. + And also from the user's side I think it is important to be aware that accessibility is a complex concept. So, for example, when public organizations want to address it, they should dedicate time and efforts of actually addressing these type of issues. +
+ ++ So we say that we are also planning to write in our paper where we wanted to detail our analysis. We can have some documentation that can be helpful for people who are interested in this kind of aspect. +
+ +So if I understood the time is over. So I show
+ ++ >> JOSHUE O'CONNOR: That was great. Thank you very, very much. Thank you for your presentation. Perfect on time. We look forward to discussing it after the next presentation. So please anyone with any questions that you may have for Fabio or items to + discuss, please put them in to the chat and we can go over them at the end of the session. +
+ ++ So that's wonderful. The next session I'm is Are Best Practices Best? Making Technical PDFs More Accessible. And this is led by Seth Polsley. So Seth is a Ph.D. student at Texas A&M University. And he works in the sketch recognition lab that applies machine + learning in AI. So Seth, are you ready to go? +
+ +>> SETH POLSLEY: Yes. Can you see my screen?
+ +>> JOSHUE O'CONNOR: We can. Excellent.
+ ++ >> SETH POLSLEY: All right. Very good. I will go ahead and begin then here. All right. Hello, everybody. As you heard my name is Seth Polsley. I am a Ph.D. student along with my coauthor Amanda Lacy. We are both at Texas A&M University under our research + advisor Tracy Hammond. Today I want to talk a little bit about our experience working with PDFs in the technical space and some ideas and practical guidance that came out of that. I will hop in to it, Are Best Practices Best? Making Technical PDFs + More Accessible. I'm sure that everyone is familiar with the introduction, how access to technical information can be a significant challenge for anyone who has a visual impairment. And part of this is because we have a reliance on figures and charts. + And there is also some authors who don't put that information in text form very well. +
+ ++ And also common formats that are used like PDFs can often be missing tags. And there is also a lot of limited software support to supporting tags as we see in PDFs as we will see. The mathematical nature of scientific public nature lends itself to technical + notation and complex spatial relationships which can also lead to difficulty consuming that content. +
+ ++ So we're kind of motivated to look at this specifically from a technical paper and conference perspective. I myself serve on the Accessibility Committee for ACM UI this year. And we have been, would the practices that come out of SIGCHI access to promote + more accessible materials for attendees and for our viewers. And there are many, many great guidelines that come out of SIGCHI. They mirror quite closely some of the WCAG guidelines for PDF accessibility and so on. +
+ ++ However, that still doesn't necessarily work with a lot of the software our authors are using. If they use a tool like LaTeX or overlay, it might not be generating the right type of document. We have noticed recently how some of the practices and policies + can impact the accessibility. So, for instance, line numbers is what I'll start a motivation around. Amanda and I were working a paper that we submitted to SIGCHI a month and a half ago. On the slide here, I have brought up a slide which has kind + of the CHI format. It has the title, author and then an abstract. And I'm just using text that comes from the first paragraph of A Tale of Two Cities. +
+ ++ So I'm going to have the computer read the kind of opening of this where you have the author. And the first line it was the best of times and it was the worst of times. I'm hoping my audio is working so people can hear. +
+ +>> It was the worst of times. It was the age of wisdom.
+ ++ >> SETH POLSLEY: That flows pretty well. What we did was when our paper had line numbering as requested this is what Amanda would hear when she ran it through voiceover, in particular as the screener she was using. >> Charles Dickens. >> SETH POLSLEY: + So that's a snippet of what Amanda was hearing. We quickly realized that this was impossible to parse. The line numbering interferes with the text. And you will notice it didn't even say it was the best of times. It was the worst of times. It skipped + to a later part of that first paragraph because the line numbers confused the reader software. So the suggestion to see Adobe Acrobat Pro. It does an excellent job of applying automatic labeling and ordering. Which if you have a compatible screen + reader, it will read properly. But we took this document and Amanda ran it through voiceover and she got the following. +
+ ++ So realized this is a software limitation. It is Apple's fault that we don't have full support. I use Apple for most of my needs. That got us thinking why can't she use the software she wants. So we started to talk a little bit about practices that would + make it easier for people to use the software they have right now as we prepare for better support in the future. +
+ ++ So one of the things we did was remove the line numbers. And that allowed Amanda to have a local copy that she felt very comfortable reading. +
+ ++ So part of the larger motivation for this as well is that we noticed it is not just our experience in this particular space. But there is broadly recognized a gap between those of visual impairments and those with disabilities. Fewer blind students are + pursuing technical degrees relative to their peers. And we know that accessible resources for teacher awareness are improving but there are still ways to go. We believe that improvements will be realized. We are very optimistic that there is so many + wonderful things being developed, but we are kind of looking at the meantime, we are thinking for conference organizers accessibility Chairs when we deal with these documents what are the things we can do to help everyone right now and how do those + lead in to ideas for future improvement. +
+ ++ So that kind of goes back to the example I gave earlier which is encouraging that authors think about what the plain text of your document is like. So you can still include image reformatting but thinking about how you present the material in the paper, + I kind of created a little image here that talks about document based structure. What I have here is a left to right image. It is in access of the accessibility of the basic structure of a document which is a plain text part. If you take away the + headings, take away the images and just some sample PDF layouts, not even any text really. Just placeholder text. Kind of most difficult for average software is things like double column with line numbering. Whereas a little more easy a single column + with line numbering and much easier is single column with no line numbering. +
+ +>> JOSHUE O'CONNOR: Two minutes.
+ ++ >> SETH POLSLEY: So in talking about these writing practices we are encouraging that authors think more about a plain text site. When they add imagery, think about clear text and more descriptive captioning and suggesting that authors explain algorithms + so all the stuff you need is in the text as well as providing easy to read and plain language criteria for any guidance. I want to take this to support Jason White had pointed out there is a lot of documentation about pushing for better standards + for tactile graphics and personification approaches. And there is a lot of great material that will be developing in that area. I'm excited to see those standards. +
+ ++ And we are interested on kind of the research and development side now. Just to finish up with that part of it, we're interested at looking at publishing algorithms that will do content tagging with vision based purchases since most of the ones out there + are proprietary as well as developing open source tools that use. Instead of just detecting if tabs are missing we can provide a freeway to locate those and add them. I want to close on opening the discussion about standards and how we can move that. +
+ ++ So one way we thought about it file upload types can be expanded to support more flexible types of submission. Just like right now when you upload a file, there is like a multi document upload option. Perhaps we could have an alternative form of a file + so we can support a more plain text version. Another thing about generating text, I found in an old standard of WCAG offline browsing guidance about generating a single download of a website. And I thought offline browsing is not useful for the majority + of people browsing the Web. However, the idea may still be relevant for document download. So maybe we can have guidance for when you have a source document how do browsers enable you to download different versions of that source document. +
+ ++ So I wanted to close with that. And I want to go in to discussion now. But this is kind of where we were thinking and this is also, I will mention briefly what we are directing towards SIGCHI and how we can integrate these practices in the future ourselves + for authors. +
+ ++ >> JOSHUE O'CONNOR: Thank you for that. Yes, lots of interesting food for thought. Seth, we may open up the discussion part of this session now. And if it is okay with you, I may stick with you because you are up. And then we can hopefully, you know, + have some discussion around this. +
+ ++ So I think one of the things, just to open it, and we do have a question in chat. One of the things that's an issue is the availability of tools that are used to create accessible PDFs. And there is a little bit of a monopoly really on that to be brief + and succinct about it. +
+ ++ I guess, you know, Acrobat Pro is the primary tool to use with these products. And I think though is that something that you think is a challenge and if so, what type of substantial challenge it takes when these tools, particularly from an accessibility + perspective don't play nice in terms of what accessibility tags or aspects are supported or not. +
+ ++ >> SETH POLSLEY: Yes. Thank you for that question. So definitely the software side is a big part of the challenge. And I'll pick on Apple one more time. Because and full disclosure, Amanda is also here in the discussion. I believe she will be chiming + in as well. She works with Apple as a tester. So we pick on them a little bit because she submitted log reports to them. But their software revised for preview, which is their PDF reader, is based on a very, very old PDF technology. So a large part + of the software is that they have to essentially rearchitect the underlying PDF support so they are able to bring better tagging support across preview, not in order to read tagging but to put in write tagging. If you look at it from the company perspective + it is a massive undertaking in order to go through a whole system level and replace this type of document structure to add that support in. +
+ ++ And I think that's where Adobe has been successful. I know there are a few other readers. Editing is limited to one or two models. But part of what we kind of want to encourage is more of an open standards on. So there are open standards in how to use + tagging and handle tagging. But we haven't found good open source tools with LaTeX to compile tagging or to post process a PDF and just add the labels and the tagging after the fact without paying for a larger subscription. We are hoping open source + can be a way to develop that. +
+ ++ >> JOSHUE O'CONNOR: Right. And thank you, Seth. And also just to please if raise your hand in the Zoom channel if you want to ask a question also please. So I mean I suppose to play devil's advocate it is PDF is an open standard. They do publish the standard + itself. So it is something that can be developed on, built on. What I'm hearing and what we know in the community is that access to tools that can generate quality accessible PDFs are at a premium is not suppose what we need in the accessibility community. + We should have as you say open source or access to tools that are maybe more accessible with the small "a" in terms of no cash money to create PDFs. And that will be something that will be broadly welcome. There was a question as well about the ePub, + Seth, as well instead of PDF for accessibility. +
+ ++ And also I was thinking of ePub in the context of you mentioned being able to download offline type of resources and some kind of manifest type resource that can be used in ePub. What are your thoughts on how ePub is handled in terms of contrast with + PDFs? +
+ ++ >> SETH POLSLEY: So another great question. I'm also not an ePub expert, but I think an ePub version would be a great alternative. One of the wonderful things of ePub the format is great. I have found ePub to have better support. PDF is an open standard, + but the document itself is a graphical document. So it relies heavily on placements on a screen and it is graphical in nature. And the text is kind of a typographical placement. Whereas something more like ePub is text first. Thinking about writing + more plain text, having a clean document structure. Having your text be the first citizen of that document. +
+ ++ >> JOSHUE O'CONNOR: Absolutely. And then with the semantic supports that are inherent within ePub as well out of the box. I think a couple of interesting things, I don't see any other comments. There was a request to return to slide 13 actually on your + deck. Please I'm not sure +
+ +>> SETH POLSLEY: Yes.
+ ++ >> JOSHUE O'CONNOR: Thank you. So I guess a couple of things I would like to add to this also, I mean one of the things that was mentioned is, you know, how assistive technology parses PDF content. And you gave a couple of good examples there, differences + between one platform over another, how certain things might be output by a particular agent such as a screen reader. I did want to mention some work that's ongoing in the W3C in the areas of pronunciation. I want to put that in the chat. There is + work ongoing in to the area of ensuring that, for example, pronunciation in the context of any kind of content, but mostly HTML content. But that's something that's dealt with. Is work ongoing with mostly HTML but other document formats whether it + is PDF or ePub. +
+ ++ These are issues for users and things they particularly need and require. How should customize outputs or have a library of personalized content that they need to be outputted in a particular way? I see Jason with his hand up to speak to this very thing. + And then we will take the comment from Jason and then we will move over to the first presentation as well, if that's okay. Jason, over to you. +
+ ++ >> JASON WHITE: Thank you. So I tried to insert in to the chat, and I'm not sure whether I succeeded at it, a link to a specification from the PDF Association which is a standardized way of converting tagged PDF in to HTML. And people in the LaTeX community + are becoming quite interested in that because they have a large project underway with funding to integrate tagged PDF generation in to their type setting system. So what's interesting about it when I have reviewed the specification is that, first + of all, one could use that as a way of deriving a readable HTML file, people who are developing type setting systems can make sure use of that as part of their processing arrangements. +
+ ++ But also it seems to me there could be scope for cooperation with some of the digital publishing accessibility efforts that have been working on ARIA digital publishing roles and some of the mathematics accessibility work that's going on in the W3C at + the same time. How do we integrate the different efforts that are under way in such a fashion that the Web based format such as HTML, MATML and ISO standard PDF format is able to fit appropriately in to the overall accessibility architecture that + we have? And those are important questions. Yes, and I agree that more free and open source tools in this area would be a good idea, especially for projects like LaTeX that are entirely operating in that area. +
+ +>> JOSHUE O'CONNOR: Thank you, Jason. Seth, I'm curious to hear what you want to say in response to that.
+ ++ >> SETH POLSLEY: I have a very brief comment. I know you would like to move on to the next paper. That's a really good point. And CHI itself has done a good job of encouraging authors to upload the raw source which is a model for a lot of the conferences + moving forward. Authors are sometimes reluctant to release the full source. The thing we can get out of the source is much, much richer generation including the HTML math. I like this idea of post processing out of PDF. HTML is a much more flexible + standard I would say than PDF. +
+ ++ >> JOSHUE O'CONNOR: Sounds great. That sounds really, really good. So Jason did good work there joining the dots for Seth and some things for further exploration. And Jason is our task force leader and research for W3C which is the task force of the accessible + architecture platform. So that's really, really great. Thanks very much, Seth. If it is okay we will move back to discussing the first paper, if that is okay. +
+ ++ Because we have had two presentations. And the first was from Fabio Paterno. So a couple of things that came out of that presentation, a couple of things that come to mind would be one was the issue of transparency and what does transparency mean in the + context of these tools. +
+ ++ Because on the reading of your research I was trying to think well, transparency could then this in one context and what standard is something being tested against. What output is the tool producing. We know from an accessibility testing that different + tools have different nuances. And they may emphasize, overemphasize or underemphasize certain things. But something that came to my mind was a broader question about the realistic expectations from automated accessibility testing. And, you know, do + you feel that people have got a realistic view of what they can expect from these tools. And if you would like to speak to that, and if anyone else has any comments or questions, please do put it in to the chat or put your hand. +
+ ++ >> FABIO PATERNO: Yes. So we make the reference, type of accessibility issues from very different perspectives, viewpoints with different expectations. So I say that it is really it is really not already clear what these kind of tools can provide. And + I mean probably also if you look at the list of the W3C provides, to some extent, but not complete. So probably something more specific on transparency aspect would be helpful in how people can filter the list of tools so that people can have a more + clear understanding with the tool provider, precise information about what they can do and what they cannot do. There are some tools that say oh, we support WCAG 2.1. But unfortunately, WCAG 2.1 means a lot of things. And so I mean the right expectation, + people need to understand what are WCAG 2.1 are really able to support. +
+ ++ And this is true also for the results. Because, for example, there are people who develop. So they have an expectation that in order to correct them, while there are people, for example, the Web Commissioner in the public organization, just mainly wants + to understand is my website at a sufficient level of accessibility. They are more interested in receiving some kind of metrics that provide overall estimation of the accessibility. And so there is a lack of indication about what matrix can be more + relevant and more informative depending on the type of users that can be in such tools. +
+ ++ So and also the part about the dynamic Web page support, because some tools say okay, this website is fine but they check the static version of the website. They don't check the part that is reading the browser that people interact with. +
+ ++ And there is sometimes a big difference, for example, when people develop frameworks such as angular. But also the fact of the various interactions, various really change a lot the content of a Web page. So people don't know exactly what version of the + website has been validated. And so they need to be more informed about the differences and also the differences in terms of the associated results. So it is really a lot of work. It would be nice also if W3C can try to address some kind of initiative + to twilight, for example, to make people more aware of these various options that can be available when validating a website with the support of an automatic tool. +
+ ++ >> JOSHUE O'CONNOR: Thank you. Just briefly, and we have a couple of comments and questions. W3C's role in this may be slightly different from what people expect. Because people can create their own tools based on common rule sets and W3C people create + a tool on a common toolset which we know is WCAG and various versions of that. How they choose to display the outcome of those tests and how they choose to abstract from them, it is up to them. W3C sets the standards to define the rule tests. So I + think there are two different things. It is a comment I would have. +
+ ++ There is a question from Carlos because he mentions that WCAG 2.2 plans to introduce test rules and he puts ARIA in. He asks if you believe this will increase trust in tools that demonstrate conformance to these rules. I don't know if you are familiar + with the link he has put in, but maybe you would like to speak about that. +
+ ++ >> FABIO PATERNO: Yeah, you mean the ACT rules. Of course, they are used for contribution because at least they provide some guidance about how to check some specific technique. Because there is also this problem. In some cases some tools say okay, yes, + we support these. But then in practice they support it in a different manner. So they may still provide different results. From this viewpoint I think yes, this can be a useful support especially for tool developers. Still I mean there is the issue + to provide representations about how the tool actually performs the validation that can be understood also by people who are not technical. Because so maybe in some cases this kind of documentation is okay for developers, less understandable by people + who are just users of the Web application and so on. But yes, this can be something that can help in providing more consistent validation. +
+ ++ >> JOSHUE O'CONNOR: Okay. Great. I touched then on the need for understanding and potentially training. And there is another comment from Estella about you didn't mention in your presentation the need for training. Do you think that training in the field + is needed? And if so, what are the skills that are needed? If you would like to speak about that. +
+ ++ >> FABIO PATERNO: Yes, unfortunately, what we noted, because this registration. So many public organizations say okay, we have to check the accessibility of our website. But then they have some internal people who work on it. They have already limited + time available. They may have to work at the end once the website has been implemented. So there is little room for modification. +
+ ++ It is real important training but I will say, too, training. So one is to try to let, you know, the people direct, I mean the such web applications to understand that, you know, accessibility is not a trigger map. Disability means a lot of has a lot of + implications. +
+ +So I think resources in order to address it.
+ ++ Then I mean for the training, one aspect of that we have to consider that also the training should take in to account the role of the potential users because again I mean on the one hand you have developers. So people want to be trained about how to modify + the implementation or the and then we have the training for say more generally users. So they want to have a more understanding of these results that such tools can generate, what they mean in practice. For example, how they are related to the real + end user experience, for example, for people who have some disabilities. So I say yeah, training but should also target the various types of roles that can be relevant in this area. +
+ ++ >> JOSHUE O'CONNOR: Very good. It is a good point which leads in to a comment we got that was a previous question asking about, you know, the user experience, and the quality of the user experience. I mean we know the standards conformance is one thing. + And it is easy or shall we say possible to create things that are technically accessible but not particularly very useable. Whereas we have to have that emphasis on good usability also in technical accessibility. And we know that this is this is being + looked at in WCAG 3 and in factoring in these aspects. +
+ ++ A nice transition to the next presentation. Yes. Thank you very much, Fabio. And thank you very much, Seth. And to everyone who contributed to the session. It is very interesting. +
+ ++ And there will be further discussion at the end. And if people have other questions or comments relating to the sessions, please put them in to the chat. And we can wrap all of this up hopefully nicely at the end. So I guess yes, thank you both. +
+ ++ So we are now at the end of the first session pretty much. And so the next session coming up I'd like to introduce Jade from Open University. So the next session we will be talking about education and accessibility. And looking forward to that. So Jade, + are you set up to take it from here? +
+ +>> JADE MATOS CAREW: I think I'm set up.
+ +>> JOSHUE O'CONNOR: Great.
+ ++ >> JADE MATOS CAREW: Okay. Hi everyone. My name is Jade. And I'm based at the Open University in the UK. I look after the accessibility and evaluation accessibility and usability evaluation team here at the OU. I'm also a participant on the Education + and Outreach Working Group alongside Carlos and Leticia. And today I'd like to introduce our two speakers for the education session. Firstly, Armony Altinier, who has worked as a digital accessibility consultant and trainer in France since 2007. She + founded Koena, a private research center to explore the implementation of digital accessibility in various organizational and intercultural contexts. She is joined by Estella Oncins, who holds a Ph.D. in accessibility and ambient intelligence from + the University of Barcelona where she works as a post doc in the field of media and digital accessibility. +
+ ++ And today they are going to give you an overview of the IMPACT project in the EU, the aim of which was to define skills and competencies and develop a framework for professionals who are engaged in accessibility education. And a real understanding of + users' accessibility needs and rights is at the heart of this project. And form the basis of how for the modular curricula was brought together and developed in the second stage of the project. I would like to invite them to give their presentation. + And I will give a reminder when there is two minutes to go, if that's okay. +
+ ++ >> ARMONY ALTINIER: Hi everyone. So I'm going to start the presentation. Then I will leave the floor to Estella in five minutes. We are going to talk about training inclusion and digital accessibility. A social need and a human right to us. +
+ ++ So the context of our project is a question, who is going to implement digital accessibility in Europe. Because we have a new framework that entered in to force on September 23rd, 2020. So it is quite new. But who is going to implement for real? The EU + accessibility legislative framework is based at the top on the United Nations Convention on the Rights of Persons with Disabilities adopted in 2006. +
+ ++ And we have three directives. In the European Union we had the Web Accessibility project adopted in 2016. This one is fully implemented in Member States. Only entered in to force on September 23rd, 2020. +
+ ++ We have two more audiovisual media service directives in 2018 and the European Accessibility Act adopted in 2019. And it hasn't been implemented in Member States already. +
+ +And then we have a standard, the EU standard EN301549 which is a complicated name. But in reality for the Web section, it's only WCAG, so the Web Content Accessibility Guideline of the W3C. It is really the same.
+ ++ So we have all this new requirements. And to quote the European Disability Forum survey of 2019, 42% of users are not very satisfied with the Web accessibility directive transposition and implementations. The maturity of new websites, 80% are found not + accessible. +
+ ++ So who is going to implement everything? So we noticed two types of skills. First, you have cross disciplinary skills, which is which are skills needed for all the digital professionals, product owners, UX designers, developers, editors. It is not a new + job. But we identified also specific skills for specific job roles. +
+ ++ We already know some jobs that are not so new in some countries. We have digital accessibility managers who are piloting the legislation implementation and how it works or their managers. They don't have a lot of time. They have an overview supervision. + And then we have WCAG or standard auditors who are taking the compliance with the standards. But the problem with this approach we think we have a missing link. And we wanted to work on that because with this approach we see the standard as digital + accessibility as a technical issue. +
+ ++ And to us it is not a technical issue at all. It's a political and social topic. And we need cultural change. So we are working on creating a new job, which is digital accessibility educator. +
+ ++ That's the aim of the IMPACT project. The acronym means inclusive method based on the perception of accessibility and compliance testing. It is an Erasmus partnership on higher education. We are five partners. So the coordinator is Koena. It is our it + is my company. So we created with this Universitat Autònoma de Barcelona. +
+ ++ We have four objectives for the IMPACT project. Map the current situation of digital accessibility practices in terms of training and practice. Define the skills framework for a new professional profile, digital accessibility educator/mediator and generate + recommendations for digital accessibility. And develop open source training materials. +
+ ++ We have four stages of the project. First, we address the skills in a skill card. Then design a modular curriculum. Now we are at stage 3, creating open source materials. And stage 4 is creating the certification, European certification. And I give the + floor to Estella. +
+ ++ >> ESTELLA ONCINS: Thank you. Okay. No. Okay. So as a follow up on what Armony has mentioned we divided in the first stage, what we did was an open survey with different or in different languages to understand which was the current situation. The proposed + competencies were four. The first one was understanding of digital accessibility. The second one, digital accessibility, context and digital accessibility services. The third one, implementing digital accessibility. And finally in digital accessibility + promotion. +
+ ++ As described in the graphic below, all competencies were regarded mostly as very important. And that's why we proceed to develop according to the ECQA guidelines, which is our certification agency to develop these competencies in to four different units + and define the content of the units. +
+ ++ So the skill types that were developed according to the competencies were understanding digital accessibility and medical accessibility. Base concepts, target groups and needs and accessibility context. The second unit was a digital accessibility context + and digital accessibility services. Accessibility services, accessibility tools. +
+ ++ The third, implementing digital accessibility, so getting started with web accessibility. Accessibility management. We also divided in to toolkits for adults and toolkits for kids, for children because we believe that inclusion and accessibility is part + of the education or should be part of the education. +
+ ++ And finally, digital accessibility promotion divide in to accessibility needs and benefits, stakeholder involvement. And accessible communication because nowadays easy to understand language is something that it covers a lot of fields in the accessibility. +
+ +>> JADE MATOS CAREW: Two minutes left.
+ ++ >> ESTELLA ONCINS: Thank you. I am in the conclusion. I'm about to finish. As conclusions, so according to the first according to the first survey that we ran that it issues that there is an international standard. And people from Europe do not know the + European standard, the EN301549 which applies to the European Union. So here there is a need for training because people are not aware about this standard. Remains unknown. Then where digital accessibility training is needed and that's why it is needed + in the European standard EN301549. And there is also a clear demand and need for training in accessibility in all countries. +
+ ++ It is true that as this was an international survey, people from Canada and from the U.S. also participated. And their training or accessibility levels might be higher. But in all European countries, there is a training need. +
+ ++ And also the interesting proposal competencies was done through an online survey. And also we run five different Focus Groups in France, Spain and Ireland. +
+ +So this is it. Thank you very much. You have the slide, you have the e mail address, Twitter and website of the IMPACT project. And if you have any questions we are open.
+ ++ >> JADE MATOS CAREW: Thank you so much. Does anybody have any questions before I rudely interrupt and ask my own question? Anyone on the chat? I'd like to ask a question if I can start. +
+ ++ So in my role at the Open University I look after a team and between us we are responsible for furthering accessibility awareness across the whole University. And I guess the members of my team we all wear different hats throughout the day, WCAG auditor, + product manager, trainer, blog writer. So it's all of these roles are really just all kind of blended in to one. And do you see any benefit to that? Or are you really trying to push for separation between these roles? Either of you if you would like + to answer. +
+ ++ >> ARMONY ALTINIER: Can I start? Yes. I think you and most of us here are rare birds. And it is really hard to promote cultural change. If we ask for people to have so many different skills, it is really hard. And it takes time. We have another job that + I didn't describe is web accessibility expert. But it is not really a job. It's an expertise. Expertise means experience. We have been working for so many years. The problem now we need to accelerate the movement and to include more people, more professionals + to become Ambassadors. So digital accessibility educator is like an Ambassador but it is not an expert. +
+ ++ And I think we really need to separate the skills. It is, of course, it is really precious to have everything combined in one person. But when you leave your University, who is going to take the part? Are you so many in your University? I'm sure you are + not. Maybe you, maybe one or two colleagues and that's all. You leave who is going to take your part? Nobody. It is too high level. That's my opinion. I don't know what if you want to complete, Estella. +
+ ++ >> ESTELLA ONCINS: I agree with what Armony just said. One of the outcomes of the survey is that most of the training is very technical oriented in terms of the digital accessibility. +
+ ++ And in fact, a lot of the developers do not even understand what does it mean to be a person with a disability. And this understanding level, this is something that Aromony mentioned in terms of cross disciplinarity. This is why it is a modular training. + It is not a closed structure because any person can get this content and embed it in their training courses. And see how it matches. +
+ ++ So this is one of the things that for us was more relevant. That is not only for one single role. It's there is a lot of standards. A lot of technical documents. But at the end we all are related or work with a developer, a designer and they do not understand + the standards. And this is what we aim to make it easy to understand. +
+ +>> ARMONY ALTINIER: Make accessibility more accessible. That's the aim of this particular job role.
+ ++ >> JADE MATOS CAREW: Absolutely. Just to go back to your point on job descriptions, my team is a relatively new team. And we found it really difficult to write job descriptions for what we wanted. It has been a real trial and error and seeing how the + roles and team developed over the past couple of years. +
+ ++ There is a couple more questions in the chat. One from Sharon. Is there an intention to collaborate with U.S. efforts such as Teach Access? And there is the UK version of teaching accessibility, I got that wrong, haven't I? So they're building their programs + in higher ed to integrate accessibility skills. So are there any plans to join up? +
+ ++ >> ARMONY ALTINIER: I think that was the point of this event today, to cross the research. And we already open and I think I can say that for the whole consortium and all the partners we love to collaborate with everyone. So yes, why not. But we didn't + have time yet. But we are really open to that. +
+ ++ >> JADE MATOS CAREW: Estella, do you want to add anything there? +
+ +>> ESTELLA ONCINS: No. We are open for collaboration. One of the aims of the project, it is resilience. The project goes beyond the scope. So it is a three years project but then collaboration, I agree with Armony.
+ ++ >> ARMONY ALTINIER: Maybe we will have, we haven't discussed it yet, but IMPACT 2. And another European project to go further. That's possible. +
+ ++ >> JADE MATOS CAREW: And I suppose that's quite a good point, isn't it? How the project can go beyond and how it can be disseminated amongst higher education providers. Any thoughts on the best way to do that, to expand the reach of your project and how + you are communicating your results and your findings? +
+ +>> ARMONY ALTINIER: Do you want to start for once?
+ ++ >> ESTELLA ONCINS: Well, we actually part of the project is also dissemination. And that's what we are going doing here, looking for people. I mean to it is not only when you create a project, it is not only about providing results. It is also looking + for pictures to establish more sustainable resources. And actually we also use a lot of resources from the W3C from the WAI initiative. So it is not that this is a stand alone project. This is a project which also looks for further involvement, for + further collaborations. I don't know, Armony, if you want to +
+ ++ >> ARMONY ALTINIER: Yes. Maybe what is original in this project is that Koena is a private company. We have the support within France with the digital professional trade union. And we really wanted to be something concrete in real life when we work and + when you want to have employees. And so we are really we really look for a way to make it to last and to be really operational. +
+ ++ So that's why we have a job role and a training. Not only a training with skills, we have a lot of skills on the Internet. We can find a lot of things. But we don't understand what it is for. And the purpose is to give direction. It is for this kind of + job you can recruit someone in your company to give a job. And he or she will need this skill. +
+ ++ >> JADE MATOS CAREW: Thank you. We have one more question I think from Carlos about what about mobile accessibility? So obviously the EU demands this as part of the legislation. Will the roles will you consider that in proposed roles? It is kind of quite + a challenging area. And my team certainly struggled with it last year, this year to meet the deadline that was imposed upon us. Do you have any comments about how you are going to incorporate that in to the roles? +
+ ++ >> ARMONY ALTINIER: We talk about the EN standard, the EU standard. So it is about mobile. But it is about everything that is digital. So not only web accessibility, and the digital accessibility educator won't be an expert on an editor. So we don't need + to give him or her technical skills. So yes, of course, they definitely will we will talk about mobile accessibility, too. +
+ +>> JADE MATOS CAREW: Estella, any comments?
+ ++ >> ESTELLA ONCINS: Yes, as Armony just said, it is not about a single role. It is about a person with a broader notion without being specific on a particular topic. So most of the times what happens is that you don't have a contact point for accessibility. + And this is something that's needed. Somebody who has a broad vision of what happens, what needs to be taken in to consideration without being able to provide a specific, but detecting. Most of the times the detecting an accessibility problem it is + more relevant than solving the problem. I don't know, but this is I don't know if you agree, Armony. +
+ +>> ARMONY ALTINIER: Yes, I agree.
+ ++ >> JADE MATOS CAREW: There were a couple of questions which were on the Forum, I think we have got time for maybe one more. How can we create an accessible design culture in a team where there is no diversity? So perhaps a little bit tangential. Do any + of you have any comments on that? It is important to talk about the team roles and furthering education around accessibility and their teams and the organizations. +
+ ++ >> ARMONY ALTINIER: I'm not sure to understand the question. +
+ +>> JADE MATOS CAREW: How can we create an accessible design culture in a team where there is no diversity.
+ ++ >> ARMONY ALTINIER: Yeah. By training. And by including users, end users I think in the process and by working on the process. Not only on the specific task of each person and we need to change that. Everyone has the same culture and no diversity. They + need to include diversity. And they need to understand they need diversity. And that's one of the jobs of the digital accessibility educator. Because it is a cultural change. +
+ ++ >> ESTELLA ONCINS: Actually just to add something, we have our team in the project, it is diverse. So we have people with different abilities. So we already included people with different abilities within the creation process. So this is something that + is already embedded in the model or curriculum this we are designing. +
+ ++ >> ARMONY ALTINIER: Thank you to mention that. I forgot. That's a natural to me. But, of course, we have People with Disabilities. We have people without disabilities in the consortium, of course. We are diverse with different cultures, different countries, + different languages, different abilities. So yes, that's so important. +
+ ++ >> JADE MATOS CAREW: We are nearly out of time. And I had overlooked a question in the chat about another standard commissioned by the European Commission EN17161. Has this been looked at as part of the project? Our comment here it is more like a management + standard. +
+ ++ >> ARMONY ALTINIER: We didn't include it in the project yet. But we are going to check. I don't know if you had this in to your scope, Estella. +
+ ++ >> ESTELLA ONCINS: Yes. Thanks, Mia, for asking that. I know that you are already very involved in all the design for all. Actually the standard in itself is not considered within the project. But all materials are concise, considering design for all + approach. So as you mentioned, so the difference between the EN301549 it is a very specific standard with very specific targets and notes. It is very linked to the WCAG guidelines, with success criteria which is not the case of the EN design for all + standard. Another issue is that the standard that you mentioned is not for free. Which this is something that the EDF is fighting for, affordable standards. But with the part from the design for all approach, that's for sure. It is already mentioned. +
+ ++ >> ARMONY ALTINIER: We have a unit about management. So it could be a resource in reference but it is not open source. So that's a problem. But yes. +
+ ++ >> JADE MATOS CAREW: I think we are going to have to stop because we are due a bit of a break. So thank you both for your presentation and answering the questions today. There are a couple more comments in the chat. Feel free to continue the discussion + on the Forum as well. So yeah, I think I might hand back to Carlos now. +
+ ++ >> CARLOS DUARTE: Thank you, Jade. And thank you, Josh, for those two sessions. Very interesting. And we are now going to have just a short break. A 15 minute break or a little less. We will be back at 20 to the hour. See you in a while. (Break). +
+ ++ >> CARLOS DUARTE: All right. Welcome back, everyone. I hope you enjoyed your coffee. And I think we are ready to start our third session. This one will be focused around AI. And I'm delighted to introduce Jason White from Educational Testing Services + as our session Moderator. Jason, the floor is yours. +
+ ++ >> JASON WHITE: Thank you, Carlos. So yes, so I'm at Educational Testing Services where I work on a combination of standards and scholarship related to difficult problems in accessibility including emerging technologies, and making accessibility better. + And thereby improving the capacity of People with Disabilities to take full advantage of educational opportunities. And I'm also A cofacilitator with Scott Holera of the Accessible Platform Architecture Working Groups research questions task force. +
+ ++ We have three presentations as part of this session, and they are all broadly connected with the theme of Artificial Intelligence and machine learning. Let's proceed with the presentations in succession and keeping each to ten minutes. And we can broaden + the discussion and consider them all in the context of recent developments in machine learning and neural networks and the role in accessibility. +
+ ++ So the first presentation then is entitled Unreasonable Accommodation Web Experiences as a Blind Person. And there are several authors. And it is going to be presented by Amanda Lacy who is a Ph.D. student in the sketch recognition lab at Texas A&M University. + She previously received a bachelor's in computer science from the University of Texas at Austin. Her research is focused on digital accessibility and remote learning. +
+ +So I'll hand it over for a ten minute presentation.
+ +>> SETH POLSLEY: Amanda, are you there and able to share your screen?
+ ++ >> AMANDA LACY: Yes, I am about to start screen share. Yes. Testing it for a very long time, since I'm congenitally blind. I'm very new to research though. So I would like to show you a little bit of what I think I have learned so far. The problem as + I understand it is that most of the web is still largely inaccessible. I know this from experience. And also the technologies are also fast, that they become obsolete before they have a chance to be made accessible. There is just not enough time. + So I'm going to show you what this means. Sometimes with descriptions, sometimes with video and give a sense of the amount of time that it wastes and frustration that it causes. And these are concrete examples that you might expect to encounter if + you are a grad student. Like registering for classes. I'm not going to show this one. I'm going to describe it because everyone's registration site is a little bit different. +
+ ++ So spending about two minutes and 40 seconds searching through a bunch repetitious text, I had to navigate some more. Then I had to accept the terms of service again and that took three minutes and 55 seconds. I got caught in a loop. I kept pressing continue + and it would take me to the previous page. When I needed to press the continue button after that and I had to enter more information and that was five minutes and 14 seconds. And I didn't get finished with that. +
+ ++ So you might get invited to play a game of Kahoot. You want to tell the person who created it because you in the game settings is a the default setting is to keep the text of the questions and the answers hidden from the players. You have to enable it + to be shown. They say that it is a but they leave it that way. And I think that maybe there should be a standard that says given two defaults or possible defaults pick the one that is more accessible. +
+ +This one does have a video. I will play it. (Video).
+ +>> Gather.Town using a screen reader. Because this is for doing poster sessions and things like that virtually. I'm going to press join.
+ +>> AMANDA LACY: So the goal of this is I'm trying to join a poster session.
+ +>> The first page, says the and that's clickable. Clickable accessories.
+ +>> AMANDA LACY: I'm baffled by an Avatar creator.
+ +>> One of these aren't labeled. Clickable, clickable, clickable. I'm pressing the next step button. My name is and entering my name.
+ +>> AMANDA LACY: Most of this time I'm hitting the tab keys or error keys or enter and space.
+ +>> I'm going to press join the gathering. Activate the button. For some reason down at the bottom of the page it tells me it can't access my camera. If I move up, and I said I have to press your name, right?
+ +>> Yes. If you want to follow you have to press my name.
+ +>> If I want to follow you I am going to hit enter on your name. So I have done that.
+ +>> Now I can tab. I think it is up to it was back. Send message.
+ +>> Do you want me to send message?
+ +>> It is in that modal.
+ +>> I can't find this at all.
+ +>> I'm not sure why it pops up where the
+ +>> I keep pressing it.
+ +>> I think you tab to get to send a message. Then you can scroll around inside that.
+ +>> So pressing message. With the arrow keys, I have to follow with the arrows and I can't press the tab.
+ +>> AMANDA LACY: So I have to press arrows or it skips over it.
+ +>> Now now it is working. That's a lot of work to be able to accomplish that though.
+ +>> Right. I did a lot of work to get through all the clutter and at the same time I can't I have to be escorted around basically because I don't have any this doesn't give me any information about what it would do if I used the arrow keys to move around.
+ ++ >> AMANDA LACY: This one is interesting. I don't have time to show the whole thing. The first thing I happened when I visited the Grace Hopper celebration conference the first button was screen reader mode. I don't know what this does. I use a screen + reader. I don't know if pressing it helped me use the site more easily or not. The site was generally very cluttered and confusing. I have to skip over it. The interesting part was that the presenter photos seemed to have been labeled using AI. +
+ ++ >> Skip forward through some link using control option demand. I will jump forward by link. This is interesting. One minute block, followed by a name and a position. All as one stream. That sounds like a very, very long run on sentence that speaks all + at once that doesn't have any pauses because it is a string. +
+ +>> Keynote. Use for accessibility.
+ ++ >> AMANDA LACY: Unfortunately all the images were labeled like that. Everyone had some colored shirt on or had a red chair or something. It was very distracting. Unfortunately we are using AI to empower users rather than just describe these things. +
+ ++ For example, SeeingAI from Microsoft allows me to do image processing and text recognition and seeing recognition, several features. It is an app for the iPhone. Facebook and Apple both have features where they will try to describe what's in an image. + Facebook will identify if your friends are in an image, for example. Screen recognition for iOS is really interesting. If I use screen recognition, it can use AI to look at the screen. And from its data identify the controls that are on the screen + and label them for me and enable me to use the app where I wouldn't have been able to use it before. +
+ ++ PaperToHTML is a website that I go to that allows me to read my papers more easily. Because the AI makes them more concise and easy to navigate through. +
+ ++ I strongly believe that we should be using AI to rescue users from clutter, rather than subjecting us to more clutter. You heard how a cluttered website already was before we inserted that stuff. +
+ +But my only solution in this case was to disable image descriptions all together. I'll show you how I did that right now.
+ ++ Mac has a built in screen reader called voiceover. Every computer with Mac OS has this. I'm going to voiceover utility. I'm going to web. Navigate by images. I'm going to turn that to never. So just as part of the problem of clutter in websites involves + AI, the solution also involves AI. Just describes in great tools. +
+ +But I don't think that any standards for web accessibility in AI exist yet. I think that the W3C should consider creating some. I would now like to open it up for feedback, questions. I'd love to have discussion about this. Thanks for your attention.
+ +>> JASON WHITE: We are going to have the questions and discussion, but those will be deferred until all three presentations have been given. So.
+ +>> AMANDA LACY: Excellent. Thank you.
+ ++ >> JASON WHITE: But they're coming up. So let's see, our next presentation is and that was interesting. Thank you very much for a thoughtful presentation there. +
+ ++ And unfortunately we are pressed for time. So the second presentation is entitled Making Media Accessible to All. And it is going to be presented by Andy Quested, who has been a Rapporteur for the International Telecommunication Union Working Group Party + 6c. He has chaired several Rapporteur groups including the one that developed HDI TV and co Chair of ITU's joint accessibility group recommendation. In 2015 he was appointed to the Chair of Working Party 6c and initiated new areas of study on advanced + immersive sensory media systems and Artificial Intelligence in media systems sorry, in content production. Currently Andy leads standards work in the ITU and in the EBU, that is to say the European Broadcasting Union. So I will hand it over for your + presentation for ten minutes on this topic. +
+ +And then we will return. Thank you.
+ ++ >> ANDY QUESTED: Thank you. And I hope you can hear me. Apologies for being late to this meeting or missing some of the first session. I was at the ITU meetings themselves. Every time I see someone like the last speaker is I am amazed. We looked at the + images to make sure they look good without consideration. So how things are beginning to change is something I'm very passionate about, knowing people who struggle with all forms of accessibility to the web and to what I'm passionate about with the + television. What I'm going to talk about today is slightly different to many people in that it is really concerning accessibility of media itself. +
+ ++ Media is actually a television, cinema, but also Web pages. We all know that the majority of the traffic on the Internet is based around video and audio. And is that accessible. My battle always is how do we make media accessible to all. So for description, + I'm a white male. I'm in my 60s but wish, I wasn't. Wearing a T shirt with a headset. And I got a background of the ITU. And my current slide is the two standards or two reports rather leading to standards that we are working on for accessibility. +
+ ++ The first one is Artificial Intelligence systems for program in production and exchange. And the second one is a collection of use scenarios for current status of advancement of immersive sensory media. They are really interrelated. And the reason they + are separate they don't get too large. How could the current advances in technology based on what people perceived as immersivity be used for accessibility? +
+ ++ The first question I always ask is okay, you want to put this amazing system in my living room with 22.2 speakers. 24 speakers. Can it help me if I have a hearing problem? Or if I need to have clear audio, clear dialogue. Or I can't hear at all. And I + need assistance with the haptic enhancement. Do I need speech to text, text to speech? A lot of work being done in AI, simple things like transcript to put programs together. But actually if we do this properly, they are there for accessibility. AI + generated presenters can be signers. This is not replacing the signer. This is allowing a signer to be sitting anywhere at home, to sign a program as opposed to having to come in to a signing studio which is immediately limiting the amount of signing + we can do. +
+ ++ Automatic signing translation, how many languages can we get signing in simultaneously. Automated caption translation and automatic language translation. These things are all emerging, sometimes for the very wrong reasons. So we sat down and had a look + at some various standards, ISO. The background to this is what do we really mean when we are talking about accessibility. It is not just about what many people call a disability. It is about personalized media. So sensory is about see, hear, taste, + smell. Physical is manipulation, touch, move. Cognitive is perceived, understand, language. It is not just about sight and sound. It is about the quality of experience of a program. +
+ ++ In the EBU and ITU we decided to split down further in to seeing, hearing, participating and understanding. Seeing is a range of options. Hearing is a range of options. We are asking TV and many websites to interact with content. How do you participate. + How do you press red on a remote control if you have a motor function issue. And it takes you too long or you need some other device to press red. Understanding is not just about learning disabilities and autism. It is about aging as well. Can a person + of 80 or 90 understand the program going at a speed that's produced by a 20 year old? +
+ ++ This is the scale we came up with. So the slide I'm showing at the moment is four horizontal lines with hearing, seeing, participating and understanding. Each one, if you like, has a marker which you can slide up and down. Left to right. Because there + is no up and down in this. There is no good or bad. It is just options. +
+ ++ So for hearing, for example, I might have a home system with full cinematics. Or I might need dialogue enhancement or dialogue only or I want captions or I want a signer. That's all about hearing. Seeing, can I enhance the video. The color. There are + several realtime software devices now that can actually enhance color for colorblindness. Differentiate between red and green, for example. And also chance the difference between edges to be able people to see more clearly. +
+ ++ Participating is about voice control, any device, hybrid devices, Braille devices. Spoke devices, don't realize that many people actually have devices split for a specific need. But how does that speak to something that's giving data out saying how say, + for example, a voice device would work, will it work if it is not a standard voice device. And then understanding your content as made by the producer, simplified dialogue, stretch programs, et cetera. +
+ ++ All are possible with the technology we have got at the moment. And we start dividing media up in to layers. Again four bars showing essence. And what this means is what makes a narrative accessible. This starts with a script. It doesn't start when we + finish a program. The object layer is what objects do I need. Vision objects, audio objects, data objects? All of these make up a program. And an object options layer, what can I change here? How much can I manipulate these objects to actually my + needs? And finally the control layer is how can I change it. How can I manage this change. +
+ ++ And this is where the common use profile comes in. The slide at the moment shows a group of files in to a TV showing a signer. And then below it under title common user profile is an iPad without a signer but with captions. So I can have a my TV output, + my immersive output or my portable or mobile output. And the common user profile I might start with one device and move to another. +
+ ++ So I need to be able to port these options between the devices. So the common user profile that we are looking at in the ITU is a way of taking the data about accessibility and personalization and moving it in to the real world for people to use. +
+ ++ So to finish I'm going to show you a video. And I it is basically audio described. I had to chop off the top which set the scene. The scene set, this is from a UK book festival where a girl has very, very severely deficient vision, is brought on stage + to be a volunteer to try some a headset that allows her to read a book. I will speak at the end of it. +
+ ++ >> At the age of 16 Maisy was diagnosed with a brain tumor. Over the next few months her vision was reduced. Maisy had agreed to try on stage the latest version of these give vision goggles. To amplify the wearer's remaining vision and highlight outlines. +
+ +>> So Maisy, are they working?
+ +>> Yes, I can see your microphone and I can see that you are smiling. And I can see the buttons on your shirt.
+ ++ >> One of the things that you said you really miss is reading. Okay. So we have a copy of Harry Potter and the Philosopher's Stone here. Would you I don't know whether this is going to work. Do you think you would be able to read us the first couple of + sentences if I hold the microphone up? Can you see it? +
+ +>> Yes. I'm sorry. Okay. Mr. and Mrs. Dursley of No. 4 Privet drive were proud to say that they were perfectly normal. Thank you very much. (Applause.)
+ ++ >> ANDY QUESTED: So from that you can see or you can hear the technology is actually helping people. And it is a way to bring to life things that we take for granted. The other thing I'm worried about is how these standards work together. If we don't + actually have standards, all these things will stop working if one from device to another. So I'm very keen these standards are brought together internationally. The competition there is good. But the data and the description of what we need to do + is the most important thing. Thank you very much. +
+ ++ >> JASON WHITE: Thank you indeed. So we will proceed now to our third presentation which is Preliminary Insights from a Chatbot Accessibility Playbook and Wizard of Oz Study. This is to be presented by Jeff Stanley, who is a human centered engineer at + the MITRE Corporation where he leads research in human machine teaming and accessibility. +
+ ++ Before coming to MITRE, Jeff spent 15 years developing interactive applications for organizations such as the Smithsonian Institution, Rosetta Stone and UB Soft. Jeff has degrees in computer science and anthropology and an undergraduate degree in linguistics. + Jeff, ten minutes. And an interesting presentation to come. +
+ +>> JEFF STANLEY: Can you hear me?
+ +>> I'm not seeing the screen yet. We can hear you. You started screen sharing but I'm getting
+ +>> JEFF STANLEY: I am screen sharing. Let me try again here.
+ +>> JUDY BREWER: Try again to select the document you want to present. And otherwise we can project it from another computer on our side.
+ +>> JEFF STANLEY: Okay. I apologize. I tried to screen share and it seems like my entire computer has just frozen. So
+ +>> CARLOS DUARTE: I can share it for you and you let me know when to change slides.
+ ++ >> JEFF STANLEY: All right. Okay. So unfortunately because my computer froze I don't have access to what I'm talking about. So I know we are already behind schedule, but I wonder if you could just wait a couple of minutes while I reboot my computer here. +
+ ++ >> JUDY BREWER: Sorry to jump in out loud. But may I suggest either that we if we have another presenter cued we go and give Jeff a moment or that we resend his presentation to his mobile. We have a few different options. Or just wait. Carlos, this is + up to you. +
+ +>> CARLOS DUARTE: We don't have another presentation. Can you access your presentation on your mobile, Jeff?
+ +>> JEFF STANLEY: I can access my presentation on my mobile but I was having trouble using view on the mobile.
+ +>> CARLOS DUARTE: Because I'm already sharing your presentation. If you are not seeing it, it is hard to coordinate.
+ +>> JEFF STANLEY: I will start to speak. And then as my computer reboots I will be able to speak more in a more sophisticated manner.
+ +>> CARLOS DUARTE: Okay. Thank you very much.
+ ++ >> JEFF STANLEY: So hello, everyone. We are happy to be able to present okay. Sorry I got cut off. Hello, everyone. We are happy to present this ongoing research that we have been doing at the MITRE Corporation that's about accessibility for chatbots. + We have a great team working on this. Several of them are on the line with us today, including my colead Ronna ten Brink. +
+ ++ Next slide, please. So this should be a slide about this should be a slide introducing the problem space. So who is MITRE? MITRE is a not for profit organization that works in the public interest. We operate federally funded research and development centers + for the U.S. Federal Government. And we collaborate with a variety of agencies on topics such as tax administration, transportation, and health among other things. +
+ ++ So as we know chatbots introduce novel features that are not addressed by a current digital accessibility guidance. Because they present content in sequence to a user and expect inputs in sequence. So these are things like message notifications, how does + one go back through the conversation history. How does one navigate options that might be presented. And without established guidance related to these novel features the risk is that chatbots will produce unrequitable access to services. We know that + Federal Government agencies and local agencies are making chatbots to deliver services. And we want to make sure that those are equitable. +
+ ++ Okay. So the next slide should be about a literature review. Chatbot accessibility guidance, is that right? Are we in sync so far? +
+ +>> CARLOS DUARTE: We are. You are doing great.
+ ++ >> JEFF STANLEY: Okay. So the first step in this research, was we reviewed existing guidance and some of this was academic studies. And some more was like industry online Articles and vlogs. We did a comprehensive web search and we found just 17 sources. + And we extracted 157 unique recommendations from those sources. +
+ ++ We clustered those in to themes and published that research and presented it at a conference in February. And that's been published by Springer. It was published in September. And we have the reference for that in the back of this presentation. Or if + you request it from me I can send it to you. One thing we found is that there is a lack of actionable steps here. The forces that did provide actionable steps really kind of drew from what was within the Web Content Accessibility guidelines and didn't + venture in to the new challenging spaces that chatbots present. +
+ ++ The academic studies did do empirical research with users to kind of clarify those spaces but don't provide actionable guidance. We saw a gap there. And we are addressing that gap by matching it together with W3C guidance, seeing which W3C guidance can + be adapted and where the mismatches are, where new research is needed. And we are creating a chatbot accessibility playbook which will be published and released in December. +
+ +And we are working with the U.S. General Services Administration, GSA, to make sure that that chatbot is socialized, and is available to everyone so that they can take advantage of that guidance.
+ +We also perform a small user study to try to refine the guidance in the playbook and identify opportunities for future research.
+ +Next slide, please. So this slide should be technical approach. I just covered it. So next slide, please. And we will talk about the playbook.
+ ++ Okay. So this slide should be about the chatbot accessibility playbook. And our playbook we have our five plays. The most important one arguably is the first play is is selecting an accessible platform. They draw on a third party platform. And that platform + restricts what can be customized and what is accessible about the chatbot. +
+ ++ So we heard from teams that they found out after developing a chatbot that something was inaccessible and not able to fix it because it was not within the platform's capabilities. And the third party organization did not have the resources to fix it. + The other plays are designing the content, designing the interface, making sure that the chatbot embeds and integrates properly with the rest of the website. And then underlying all of those that should be done through the entire process is testing + with real representative users. Including, of course, users with a variety of disabilities. +
+ ++ In our playbook, there are five plays. Each play has a number of recommendations underneath it. And each recommendation has a number of activities underneath. We have two graphics on this slide. The first graphic is just presenting the five plays. The + second graphic is showing how a particular play, the one about designing the chatbot's content has a number of recommendations under it but two are shown here. Making sure the chatbot identifies itself as a bot and not a human as well as making sure + the chatbot states its purpose and what it can do. And then each of those recommendations have a number of activities underneath it. +
+ ++ So some of the activities here are discussing whether the chatbot should have a profile picture associated with it and what that should look like. For instance, should it be a robot or a more human picture. And questions to that the development team can + ask itself, questions that the development team can ask the user during user research. +
+ ++ Okay. On to the next slide, please. So we conducted this small user study. We had six participants who were blind or visually impaired and six participants who reported no disabilities. And we developed a prototype chatbot using a Wizard of Oz interface. + Meaning that one of us was behind the scene controlling what the chatbot responded and we can provide some canned responses. +
+ ++ The goals here were to see if the guidance we gave in the playbook was sufficient or lacking in some way. So what is the experience between the two groups to control these if the experimental group was similar or desperate? As well as to uncover gaps + that we may have missed so far. +
+ +And we had quantitative surveys that we asked the users as well as we coded in a qualitative way their responses and reactions while using the chatbot.
+ ++ Some of the things that we found here, there are lots of open questions. The biggest one probably is that current paradigms for using chatbots don't match what our blind and visually impaired users were expecting. So, for example, our chatbot would give + a response. And then that response might have some canned options under it. So I'm sorry, which slide are we on right now? +
+ +>> CARLOS DUARTE: I just moved us to the next slide, next steps for accessibility research.
+ ++ >> JEFF STANLEY: Okay. All right. Could you please go back to the previous slide? There is a graphic there that I forgot to talk about. So this is the slide on the user study overview. And here we have a graphic that is showing the user experience while + interacting with the chatbot. There is a chatbot on this tax administration website. And the chatbot is asking what is your filing status. And there are a number of options in that question, user can select single, married, filing jointly, et cetera. +
+ ++ And we found that users expected that if they the blind and visually impaired users expect if they tabbed down after being asked a question like this that they would be presented with those options. But that's not how our chatbot was implemented. Focus + always stayed in the pretext input box, that is at the very bottom. +
+ +So if they were to tab down, they would actually leave the chatbot. This was kind of the opposite of what they wanted. And they discovered that they needed to tab up in order to get to those options.
+ ++ So the keyboard users were expecting to interact with the chatbot in a way that we had not implemented it to support. And in a way that current chatbots are not set up to support. And so a lot more research should be done here to find out what users with + disabilities are expecting when they interact with their chatbot. Whether that's keyboard navigation or some other device or assistive technology. +
+ ++ Some other open questions we found are whether the chatbot should be more human. More like a conversational partner or more like a tool or a transactional tool. And whether messages, if we had a lot of content to present, whether that should be presented + in one long message. Or several shorter messages in a row. +
+ ++ And we had users that expressed preferences for either option in those cases. And so these are open issues that deserve future research. In addition our small study did the same thing that other studies that we had found in our literature review did which + is the focus on users with visual impairments and that's just one kind of disability. So future research needs to focus on a wider variety of disabilities when it comes to chatbot interaction. +
+ ++ And I'll just wrap up here. And say that in one of our reviewers, in reading our paper, mentioned that there is a task force that is working on some natural language interface accessibility user requirements and we would very much like to find out how + that's going and collaborate with them, because it sounds like that will be contributing to new standards in this area. +
+ ++ So in summary, I apologize for the technical issues. And not being able to have my notes in front of me. But I feel that I have given a pretty good summary of what we did. And thank you so much for operating the slides when I could not. So if you progress + to the slide that has my e mail address on it, we are looking to for opportunities and collaborators to help us socialize and test this chatbot playbook once we release it and to figure out what the next steps are. So if you are interested in this + topic at all, please send an e mail and we will be happy to talk about it more. +
+ ++ >> JASON WHITE: Thank you. Let's do what we can with the discussion. I'm wondering whether one of my colleagues here could put the link to the first public working draft of the natural language international accessibility user requirements in to the chat. + That's currently open for public review and comment as a first public working draft. And everyone attending this symposium is very welcome to contribute by commenting on that draft. +
+ ++ So let's see, Carlos what do we have by way of questions from the chat then? And perhaps what I will just say before we go to that, is that the theme of machine learning and Artificial Intelligence is common to all three of our topics in this session. + In the first instance so far as is concerned the difficulties of dealing with the complexity and the accessibility issues of using assistive technologies with web based content and applications and the ability of machine learning solutions to undertake + recognition tasks and to simplify those interactions. +
+ ++ Secondly, obviously in relation to media accessibility where caption generation and potentially description generation have all been mentioned. There is some interesting work under discussion within the research questions task force at the moment about + the mitigation of content that generates flashes or other effects that can induce issues in those who are susceptible. +
+ ++ In the third instance the whole field of natural language processing, including speech recognition and language processing is undergoing a revolution at the moment due to machine learning. And so I think it is a highly relevant theme for all three of + the presentations. And there are interesting questions about how we introduce the capabilities and the issues of machine learning based on Artificial Intelligence in to our work on web accessibility. +
+ +So I'll stop my commentary and hand it over to Carlos. And we will try to get through whatever questions we can in the remaining time.
+ +>> CARLOS DUARTE: We haven't had any questions yet, Jason. So I think you could pick up on those topics with our presenters, if you wish.
+ +In previous sessions this was the same pattern. Someone needs to get the ball rolling.
+ ++ >> JASON WHITE: Maybe I will introduce one of my own questions. Machine based learning recognition and natural language processing technology can be introduced at various stages. Recognition technologies could be used in media production or web content + development. For example, to perform recognition tasks or to assist in code generation and also provided directly by the end users. We are seeing increased integration of these technologies in to online meeting applications, for example. And in to + assistive technologies such as screen readers or in to web browsers. +
+ ++ And so I think there is a question here of potentials that exist as to where and how the technology is introduced and whom the risks and limitations fall. The end user, the person who is deploying the recognition technology, then they are then burdened + with having to deal with errors and limitations. Whereas the author has an opportunity to correct for those in the authoring process. +
+ ++ And it also obviously can be difficult to write content under the assistive technologies and different user agents with different machine learning capabilities built in to them. And so we don't really have any common expectations on the authoring side + if we were to start relying on the users having, for example, recognition tools or natural language processing tools at their disposal. I will open up it to the presenters to make a first run on those kinds of questions. And maybe that will stimulate + further issues from our audience. +
+ +>> ANDY QUESTED: It is Andy. Should I start off maybe thinking about the beginning of chain of making content?
+ +>> JASON WHITE: Sounds good to me.
+ ++ >> ANDY QUESTED: One of the things that we have been looking at and this is always contentious of what I call traditional media, traditional broadcasting is the use of automation and how that automation is sort of taking away the personal approach of + signing, captioning and audio description, for example. But what we've seen recently is a reverse of how AI is being used, machine learning is being used as a spell checker, for want of a better term. And just take the UK's example, where the public + service organization broadcasters have 100% requirement now for captions. Most of this is done by respeak. And those captioners are anywhere in the world working from any location they choose. And what we found is that adding AI to their output starts + to correct the bloopers and auditors that you get out of the back end and improves the quality. +
+ ++ So you are right to say it is who is responsible and for what it is doing. Because if you use it in the wrong place it could be more disastrous in terms of AI is always right and therefore the user is always wrong. But where you have an opportunity to + use technology to effectively improve the output, then we should go for it. This is going to be applied to signing as well and audio description. How do you audio describe a sporting event which a third party, and you haven't got the ability to read + those in a way that you can maintain the commentary? How to intersperse the live commentary and still maintain a good experience for the user. Thank you. +
+ +>> JASON WHITE: Thank you. That's the use of AI for quality improvements sounds to me like a good idea. Do we have anyone in the queue?
+ +>> CARLOS DUARTE: No questions in the queue. There was one of the authors of the last presentation sharing the link to the paper that Jeff mentioned. But still no questions.
+ +>> JASON WHITE: Does anyone else among the presenters wish to respond to the issues that we're discussing here?
+ +>> SETH POLSLEY: I also had a question if there is a opportunity or if no presenter speaking.
+ +>> JASON WHITE: It seems open.
+ ++ >> SETH POLSLEY: All right. So I was going to comment that, Andy, I really appreciated that video on the give vision because that was a moving way of demonstrating how we can apply this type of technology to really improve people's lives. I was wanting + to ask about your perspectives and this is kind of a general question I think for any of the presenters about how do you imagine ways that we can more broadly, integrate that type of technology, that type of assistive technology where it is specific + to that user and it has this ability to basically capitalize on their, you know, whatever modality they use. And amplify that specific to their case. That's a wearable. I am curious about your thoughts on this. +
+ ++ >> ANDY QUESTED: Yes. This is where this common user profile comes in to play. If you have a particular requirement and it doesn't mean disability. It could be any requirement. And I don't know if anybody knows about the group in the U.S. Can I Play That + which looks at accessibility for gaming. Very, very good group because they are influencing the designers as well as the console manufacturers to make the gaming more accessible. +
+ ++ What they have realized is the gamers are getting older. So a 60 year old gamer cannot compete with a 20 year old gamer because their thinkers won't move fast. You want to port your requirements between devices. If I'm watching a TV program on my big + screen TV at home and I want to carry on watching it on the way in to work, why do I have to reset that device up to have the accessibility options for that particular program? +
+ ++ If it is a music program it might be different from a drama. It might be different from a sports program. So to actually have a profile and click which is the program, I took that BBC clip, I took that clip from, there are a couple of others. One of them + is about use of haptic clothing. And it was tried with two girls that went deaf at the age of 6. Two very young 20 year olds but still love night clubbing. They were using haptic clothes that gave them the difference between vocals. A moving experience + for them. +
+ ++ But I suddenly thought okay, this designer clothes, anybody would want to wear them. It gave you an option for a totally silent disco. What happens if we don't do standards? What happens if you turn up in a night club in the queue and say we are sorry, + you are wearing an Armani shirt and we have a Versace. Enhanced dialogue and color, using all the technologies that the commercial organizations are using to sell completely immersive ideas to make completely personal ideas. Thanks. +
+ +>> JASON WHITE: Thank you. So I assume we are getting close to time now. So Carlos, anything in the queue at this point?
+ ++ >> CARLOS DUARTE: Not quite. Something just came from Miriam. The question is as a tester and developer, I'm wondering while platforms emerge and multiply and I can't be sure how or where applications will be used. How can I account in the future for + the accessibility of what I develop? And at the moment the amount of browsers, platforms and screen readers combinations are very overwhelming. We will open up that to our presenters to comment on them. +
+ ++ >> ANDY QUESTED: I feel like I should say something, I feel the same. I'm doing beta testing for Mac OS. And the first thing I noticed two years ago there were suddenly four options for accessibility that appeared as opposed to the usual two. It was the + idea of hearing and participating and understanding. And you are right, we do need to get some standards around this. Or the proliferation will mean that you may have a favorite if that favorite goes out of business, or decides to make a change it + is gone. +
+ ++ >> JASON WHITE: All right. So interesting questions there about the role of Artificial Intelligence in both the authoring process, and the usage of web based content and applications as well as questions about obviously privacy and whether processing + should best take place. And the different roles that machine learning based Artificial Intelligence could play in the creation of content, the remediation of content and also quality assurance. So I think that's a very good series of questions to + influence the development of well, potentially future symposia or future discussions within the research task force. Any final comments or questions before we conclude the session? +
+ +>> CARLOS DUARTE: There is another comment, and I guess also to Andy I suppose, about profiles and porting information, isn't that what the GPII project started to do?
+ +>> ANDY QUESTED: Yep. It is. And we are looking at the standards that we would need to input to that so that content can be created for any platform. But I noted Ronna has her hand up. But it is about collaboration, I think, between the groups.
+ +>> CARLOS DUARTE: Andy said Ronna has her hand up. So I think we might close it on that.
+ ++ >> RONNA TEN BRINK: Hi. I'm one of Jeff's coauthors on the chatbot accessibility work. I wanted to speak to the question about from the task room developer about all the different kinds of accessible of assistive tech and accounting for them in the future. + I think that well, that's like that's a really big problem. There are also kind of new kinds of interactions and technologies that are being developed right now such as chatbots, not to, you know, toot our own horn where we can develop some very core + accessibility paradigms around there. That can help support whatever kinds of assistive tech that get used in the end. +
+ ++ So I think as these new technologies and ways of interacting emerge it is important that we focus on how to build those core accessibility paradigms in terms of structure, and design and things like that that then will cascade down to an easier time actually + developing and testing for the literal assistive tech at the end of the interaction line. Over. Thank you. +
+ ++ >> JASON WHITE: Thank you. And I just note that in addition to work on natural language interface accessibility, the research question has recently published on augmented reality and virtual reality in immersive requirements and work on realtime communication + and remote meeting accessibility. All of which speak to the theme of increasingly important approaches to human computer interaction and their accessibility. +
+ +So I imagine we have probably hit the time limit. How do we stand?
+ +>> CARLOS DUARTE: Yeah, we did. Questions started to appear and unfortunately we have had hit the time limit as you said.
+ ++ And we need to move to the next session. So thank you so much Jason and all the presenters. And this just before the closing. So this final session, our goal here will be to try to frame these discussions from the perspective of the symposium objectives. + I will ask Judy Brewer to comment on the issues that have been presented here from the perspective of the W3C Web Accessibility Initiative. So go ahead, Judy. +
+ +>> JUDY BREWER: Thank you, Carlos. And just making sure that I'm on screen and that my audio is coming through clearly.
+ +>> CARLOS DUARTE: Loud and clear.
+ +>> JUDY BREWER: Okay. Great. Carlos, thanks so much for the opportunity to comment as we come to the close of this research symposium. And my thanks to the presenters and Moderators as well who have given us plenty of food for thought.
+ ++ As Carlos said at the beginning of the symposium through this part of the WAI CooP we want to foster the research space, WAI CooP standards for W for WAI Communities of Practice is what WAI CooP stands for. Our intent is to better understand what's needed + in support of uptake of accessibility standards including at the regional level as well as how to better leverage input from all parts of the world to improve the relevance and quality of future WAI work at W3C. +
+ ++ I want to speak now to the question of how this symposium relates to our work at the World Wide Web Consortium Web Accessibility Initiative. The work that WAI does is work that contributes to standards development for accessibility web based technologies. + This symposium helps W3C scan the space of issues that we may need to consider for our work in the future. And it also brings attention to angles we need to consider for our current work. WAI work at W3C is chartered under the W3C process. It includes + Working Groups, task forces and open and transparent and available for public review and comment which we invite and welcome. +
+ +W3C community groups allow more informal exploration of opportunities for potential future innovations in web based technologies, including in support of greater accessibility in the future.
+ ++ W3C process relies on multi stakeholder participation from industry, from user groups including people in the disability community, from research, from education and government, all partners of the table together. The Web Accessibility Initiative itself + develops standards and guidelines, implementation materials in support of these standards and authoritative educational materials to support harmonized uptake of these standards. These resources are all available through our website, w3.org/wai. Again + that is w3.org/wai. +
+ ++ And I trust my colleagues will put that link in to the chat, though I'm hoping you all know it and frequently visit it. In any case where you can find not only the Web content accessibility guidelines but an ecosystem of authoritative and educational + resources. Our intention is to be cross disability. I wanted to particularly mention that, because many of the research presentations that you heard today were focusing particularly on issues related to blindness, but I want to assure you that in + our WAI work we are looking at a full range of disabilities that can effect accessibility of web based digital technologies. And we are also working on expanding and improving accessibility provisions with regard to cognitive language learning needs, + neurodiversity, deafness and hard of hearing, mental health accessible user needs and low vision needs. We are always looking at these additional areas and have active work in all of those areas. +
+ ++ W3C including WAI is also working to increase multiple aspects of diversity not only on the disability dimension but also with regard to race, age, culture, gender, geographies, languages, through ongoing work, creating a more welcoming environment, and + outreach efforts to increase awareness of the opportunity to participate at the standards table and to to join the discussion. +
+ ++ So our work at W3C goes through a consensus process with wide public review. We not only welcome but we count on broad participation and review of our work to ensure that our work represents a comprehensive range of needs and perspectives. In the research + questions task force, which as Josh O'Connor mentioned earlier today is part of the Accessible Platform Architectures Working Group. We have been developing a series of accessibility user requirements. These include XR, accessibility user requirements. + XR standing for a combination of virtual reality and augmentative reality and realtime communication accessibility user requirements. Immediate accessibility user requirements, an older document that we might update. Media synchronization. How to + ensure that smart agents are cross disability accessible and timely for our current circumstances, the research questions task force has been compiling issues from accessibility of remote meetings. +
+ +We welcome your feedback on the public draft. Thank you for dropping several things in to the chat there. These kinds of accessibility user requirements documents are not normative. In other words, they are not standards.
+ ++ But they are developed in coordination with the relevant standards Working Groups of W3C. To help standards developers and those Working Groups be aware of user accessibility needs, and technical implementation support requirements, and they are composing + their standards. So that their standards can provide that foundation level support for accessibility. And especially in this space of technologies that are emerging on to the web. It is no longer just web or just web and mobile. It is also automative, + gaming, many different areas of technology. +
+ +Over the years we've seen many accessibility requirements make the journey from awareness and understanding, to technical specification implementation and widespread deployment. We invite you to be part of that process.
+ ++ And here I want to just give a shout out to Amanda Lacy. Your presentation I felt was very effective and, you know, helping people understand this isn't all working well yet, particularly when you add Artificial Intelligence in to the mix. These are the + needs we need to address. And we welcome you to the table to first practically look at how we can perhaps as you were asking consider developing guidance for use of AI effectively for accessibility without getting in to the AI bias qualities and so + forth. +
+ ++ I would like to thank the others on the planning team, especially Carlos and Leticia who have led this part of the WAI CooP, and the interpreters and captioners and everyone who has helped in the background with logistical support. And the participants + and the symposium for being here. Again you can find how to get involved section at w3.org/wai. Please stay safe and well. I will turn this back to Carlos, but I hope to see all of you in our work going forward if you are interested. Thank you so + much. +
+ +>> Thank you.
+ ++ >> CARLOS DUARTE: Thank you so much, Judy. Just before moving to the closing session, I would just and because one of the goals of the symposium were to identify topics for the next symposium, I would just like to highlight some of the issues that I think + are definitely worth considering for at least two more symposia from what we have heard today. +
+ ++ I would say on one hand, there are standards, definitely. I think the first two sessions show that. We have standards for the web. We have standards for PDFs. But I guess these standards are being read. They are being used for new people that aren't used + to working with standards. And what we're finding out I guess is that these standards do not work well for them. So either they don't understand the standards or they just don't follow them. So is this a problem with standards? Is this a problem with + the way we communicate standards to others? And how can standards organizations help with this? +
+ ++ Also definitely there are issues related to the challenges that is that face developing standards for emerging technologies and I think this became quite apparent in the last session. So I would say we do have food for thought. And we do have topics to + explore definitely in the coming years. Thank you, Morton, for suggesting also topics for the future. If any of you want to add your suggestions, please do. +
+ ++ And moving to I would say the closing session, it is just to keep everything on schedule, I would like to echo Judy's statement and thank all the organizing team, thank you. A big thank you also to our three Moderators, Josh, Jade and Jason. A big thank + you to all the presenters. As Leticia has already entered here in the chat but it is easy to miss that, we and also as part of one of the contributions of this project, we have created an online fora where we hope different communities can come together + and discuss accessibility related topics, not just research topics. And you can find that at topics.web directive.eu. Thank you, Leticia, for pasting the link in the chat. +
+ +We in that Forum you will find the sessions of the symposium and the papers that have been presented. So you can discuss those. You can keep discussing those in there. There are already multitudes of topics to follow on the discussions.
+ ++ This is all from my side. I invite you all to attend next year's symposium. We will announce it as soon as we define which topic we will address. And I invite you all to follow up on the WAI CooP developments. You can find the link for the newsletter + at the website. If someone can also paste it here in the chat, it would be useful. And I don't know if any of the Moderators or the organizing team wants to add something. +
+ +>> JOSHUE O'CONNOR: Thanks for having us, Carlos. That was great. Thanks, everyone. Really enjoyed it.
+ +>> CARLOS DUARTE: Thank you so much, Josh. And I think we can close the symposium. Thank you, everyone, for attending. It was great to have you here. And I do hope that you've enjoyed the symposium and that it has been valuable for you. See you next + year. +
+ +>> Thank you.
+ + + + \ No newline at end of file diff --git a/pages/about/projects/wai-coop/symposium2-captions/accessible_communication.vtt b/pages/about/projects/wai-coop/symposium2-captions/accessible_communication.vtt new file mode 100644 index 00000000000..49fccb18ce8 --- /dev/null +++ b/pages/about/projects/wai-coop/symposium2-captions/accessible_communication.vtt @@ -0,0 +1,2859 @@ +WEBVTT +Kind: captions +Language: en + +00:00:00.000 --> 00:00:02.720 +Hello, everyone, and welcome back + +00:00:03.720 --> 00:00:05.880 +to the second panel. + +00:00:05.880 --> 00:00:08.800 +And I'm now joined by + +00:00:10.360 --> 00:00:14.080 +Chaohai Ding +from the University of Southampton. + +00:00:14.760 --> 00:00:17.840 +Lourdes Moreno +from the Universidade + +00:00:17.880 --> 00:00:20.880 +Carlos III de Madrid +in Spain. + +00:00:21.120 --> 00:00:24.120 +And Vikas Ashok from + +00:00:24.320 --> 00:00:26.960 +Old Dominion University in the US. + +00:00:27.760 --> 00:00:31.120 +So thank you all for your availability. + +00:00:31.120 --> 00:00:33.840 +It's great to have you here. + +00:00:33.840 --> 00:00:37.800 +And let's as I said before, + +00:00:37.800 --> 00:00:42.000 +let's bring back the topic +of natural language processing. We + +00:00:43.160 --> 00:00:44.640 +we addressed it + +00:00:44.640 --> 00:00:49.160 +yesterday, but now from the perspective of + +00:00:49.160 --> 00:00:54.240 +how can it be used to enhance +accessible communication on the Web. + +00:00:54.560 --> 00:00:57.680 +And so here and I guess + +00:00:58.160 --> 00:01:01.120 +once again, similar to what I've done + +00:01:01.800 --> 00:01:04.720 +an hour ago in the first panel, + +00:01:04.720 --> 00:01:07.920 +you've been working on different aspects +of this + +00:01:08.160 --> 00:01:11.400 +large domain of accessible communication. + +00:01:11.840 --> 00:01:13.440 +And you’ve + +00:01:13.720 --> 00:01:16.520 +pursued advances in machine + +00:01:16.520 --> 00:01:20.000 +translation, in sign language, + +00:01:20.400 --> 00:01:24.400 +AAC, and so from your perspective + +00:01:24.400 --> 00:01:27.120 +and from your focus on the work, + +00:01:27.640 --> 00:01:32.160 +what are the current challenges +that that you've been facing? + +00:01:32.160 --> 00:01:34.200 +And that's that are preventing + +00:01:35.920 --> 00:01:38.080 +the next breakthrough, I guess. + +00:01:38.080 --> 00:01:43.560 +And also I would like to ask you to, +uh, for your first intervention also to do + +00:01:44.080 --> 00:01:47.680 +a brief introduction to yourself +and to what you've been doing. + +00:01:48.240 --> 00:01:54.080 +Okay, So I can start with you +Chaohai. + +00:01:54.120 --> 00:01:55.920 +Thank you for having me today. + +00:01:55.920 --> 00:02:00.720 +My name is Chaohai Ding and I'm a senior research +fellow at the University of Southampton. + +00:02:01.160 --> 00:02:05.560 +And my research interest is +AI and inclusion + +00:02:06.200 --> 00:02:09.440 +which includes using data science and AI + +00:02:10.400 --> 00:02:12.000 +techniques to enhance + +00:02:12.000 --> 00:02:15.440 +accessible learning, traveling and + +00:02:16.520 --> 00:02:17.920 +communication. + +00:02:17.920 --> 00:02:20.600 +So, yes, we used... + +00:02:21.120 --> 00:02:22.400 +NLP has been widely + +00:02:22.400 --> 00:02:26.160 +used in our research to +support accessible communication. + +00:02:27.200 --> 00:02:31.480 +Currently, we are working on +several projects focused on AAC. + +00:02:31.480 --> 00:02:33.520 +So for example, + +00:02:33.600 --> 00:02:36.280 +we applied the concept + +00:02:36.280 --> 00:02:39.960 +net of ... knowledge graph + +00:02:40.080 --> 00:02:42.600 +to interlinking AAC + +00:02:43.920 --> 00:02:46.000 +symbols from different symbol sets. + +00:02:47.200 --> 00:02:49.120 +This can be used + +00:02:49.120 --> 00:02:51.800 +for symbol to symbol translation. + +00:02:52.640 --> 00:02:57.840 +And we also developed an NLP model +to translate + +00:02:57.840 --> 00:03:02.760 +the AAC symbol sequence +into spoke text sequence. + +00:03:03.400 --> 00:03:07.200 +So so that's the two projects +we're working on currently + +00:03:07.920 --> 00:03:12.000 +and we also working on +accessible e-learning project + +00:03:12.360 --> 00:03:14.880 +that we applied a machine translation + +00:03:16.040 --> 00:03:18.480 +to provide transcripts + +00:03:18.480 --> 00:03:20.600 +from English to, + +00:03:20.600 --> 00:03:23.400 +other languages +for our international users. + +00:03:24.000 --> 00:03:27.680 +So that's another scenario +we are working with on a machine + +00:03:27.680 --> 00:03:30.800 +translation for accessible communication. + +00:03:30.800 --> 00:03:35.360 +So there are a few challenges we have +identified in our kind of research. + +00:03:36.240 --> 00:03:38.640 +The first one is always the data, + +00:03:38.640 --> 00:03:42.640 +data availability +and the data opti...bility + +00:03:43.160 --> 00:03:48.360 +So as we know, an NLP model is normally +trained on a large amount of data. So + +00:03:49.360 --> 00:03:52.200 +especially for AAC, + +00:03:53.720 --> 00:03:57.200 +we, we are... +one of the biggest challenges + +00:03:57.200 --> 00:04:00.480 +is that we are lack of a + +00:04:00.480 --> 00:04:03.360 +data like user + +00:04:04.240 --> 00:04:07.720 +user user data... AAC data, +and also + +00:04:08.360 --> 00:04:12.120 +how a user interact with the AAC. + +00:04:12.520 --> 00:04:14.600 +So so + +00:04:16.080 --> 00:04:17.600 +which... + +00:04:17.600 --> 00:04:20.400 +and also we have several different + +00:04:21.000 --> 00:04:25.800 +AAC symbol sets used +by the different individuals + +00:04:26.040 --> 00:04:30.600 +and which make it very difficult +to develop NLP models as well + +00:04:30.840 --> 00:04:33.760 +because the AAC symbols + +00:04:33.760 --> 00:04:38.040 +are separate for each symbol set +and that's the + +00:04:38.080 --> 00:04:40.720 +another challenge is the lack of data + +00:04:41.480 --> 00:04:45.080 +interoperability in AAC symbol sets. + +00:04:46.320 --> 00:04:51.000 +Yet the third challenge +we are identified is the inclusion + +00:04:51.240 --> 00:04:54.960 +because we are working on AAC symbol sets + +00:04:55.720 --> 00:04:59.000 +from Arabic, English and Chinese. + +00:04:59.280 --> 00:05:05.040 +So there are cultural and social +difference in AAC symbols, which is + +00:05:06.760 --> 00:05:08.240 +important to + +00:05:08.240 --> 00:05:13.880 +consider the needs of different end +user groups on the cultural and the social + +00:05:14.160 --> 00:05:18.240 +factors, +and to be involved in the development + +00:05:18.360 --> 00:05:20.960 +of the NLP models for AAC. + +00:05:22.440 --> 00:05:24.160 +The first one is + +00:05:24.400 --> 00:05:28.120 +data privacy and safety +and this has been + +00:05:29.760 --> 00:05:33.360 +identified in our web application for + +00:05:33.360 --> 00:05:38.760 +or from AAC symbols to spoken texts. + +00:05:38.760 --> 00:05:42.600 +So how do we, +if we want to, a more accurate + +00:05:42.600 --> 00:05:44.960 +or more specific, + +00:05:45.960 --> 00:05:47.400 +personalized + +00:05:47.960 --> 00:05:48.840 +application? + +00:05:48.840 --> 00:05:51.000 +We need the user's information. + +00:05:51.000 --> 00:05:54.000 +So the challenge is how + +00:05:54.000 --> 00:05:57.160 +how can we store this + +00:05:57.520 --> 00:05:59.040 +personal information + +00:05:59.040 --> 00:06:03.000 +and how how prevent the data +misuse and the bridge + +00:06:03.400 --> 00:06:06.200 +and how to make the tradeoff + +00:06:06.200 --> 00:06:08.760 +between the user + +00:06:09.240 --> 00:06:12.000 +information and the the model performance. + +00:06:13.720 --> 00:06:17.040 +And the last one + +00:06:17.040 --> 00:06:20.360 +is always the accessible user interface. + +00:06:20.360 --> 00:06:22.480 +So in the how, + +00:06:22.480 --> 00:06:26.040 +how to makes this AI powered tool, +NLP powered + +00:06:26.280 --> 00:06:30.840 +tools accessible +for for end users and + +00:06:32.680 --> 00:06:34.040 +and also + +00:06:34.040 --> 00:06:36.760 +there are more generic issues in + +00:06:37.760 --> 00:06:42.400 +AI like accountability Explainability +so yes I think + +00:06:42.400 --> 00:06:46.200 +that's the last of the challenges +we have identified in our research. + +00:06:47.760 --> 00:06:48.480 +Thank you. + +00:06:48.720 --> 00:06:49.000 +Thank you, Chaohai. + +00:06:49.000 --> 00:06:54.920 +Is a great summary of definitely +some of the major challenges + +00:06:54.920 --> 00:06:59.000 +that well are spread +across the entire domain. + +00:06:59.400 --> 00:07:01.200 +Definitely. Thank you so much. + +00:07:01.200 --> 00:07:04.280 +Lourdes, do you want to go next? + +00:07:06.400 --> 00:07:11.240 +You’re muted. + +00:07:11.240 --> 00:07:12.240 +Thank you. + +00:07:12.840 --> 00:07:14.520 +Thanks for the invitation. + +00:07:14.520 --> 00:07:16.920 +Good afternoon everyone. I’m Lourdes + +00:07:16.920 --> 00:07:21.400 +Moreno. I work as an associate +professor in the computer + +00:07:21.400 --> 00:07:24.880 +science department +on the Universidad Carlos III + +00:07:24.960 --> 00:07:27.880 +de Madrid, Spain. + +00:07:27.880 --> 00:07:29.160 +I am an accessibility + +00:07:29.160 --> 00:07:33.160 +expert. I have been working +in the area of technology + +00:07:33.160 --> 00:07:38.080 +for disability for 20 years. And... + +00:07:38.600 --> 00:07:41.200 +I have previously worked on + +00:07:41.200 --> 00:07:44.120 +sensory disability but currently + +00:07:44.640 --> 00:07:47.080 +I work on cognitive accessibility. + +00:07:47.640 --> 00:07:50.560 +In my research work I combinate + +00:07:50.880 --> 00:07:53.920 +method from the human computer interaction + +00:07:54.320 --> 00:07:57.720 +and natural language processing areas + +00:07:58.280 --> 00:08:00.480 +to obtain accessible solutions + +00:08:00.960 --> 00:08:03.560 +from the point of view of reability + +00:08:03.560 --> 00:08:07.120 +and the stability of the language +in user interfaces. + +00:08:08.080 --> 00:08:09.080 +And so is + +00:08:09.080 --> 00:08:15.320 +the question currently in natural +language research is being developed + +00:08:15.440 --> 00:08:18.360 +at our language model in recent years, + +00:08:18.680 --> 00:08:20.960 +there had been many advances + +00:08:22.080 --> 00:08:24.320 +due to the increasing resources + +00:08:24.720 --> 00:08:28.680 +such as large dataset and cloud platform + +00:08:29.240 --> 00:08:32.320 +that allow the training of large models. + +00:08:33.120 --> 00:08:36.480 +But the most crucial factor is the use + +00:08:36.480 --> 00:08:41.200 +of transforming technology +and the use of transfer learning. + +00:08:41.200 --> 00:08:47.640 +These are methods based on deep +learning to create language model + +00:08:48.160 --> 00:08:50.280 +base of the neural network. + +00:08:51.440 --> 00:08:53.400 +They are universal models, + +00:08:53.400 --> 00:08:57.160 +but then support is different +in natural processing + +00:08:57.160 --> 00:09:03.240 +language tasks, such as +question answering, translation, + +00:09:03.240 --> 00:09:06.920 +summarization, +speech recognition and more. + +00:09:07.840 --> 00:09:13.840 +The most extensively used models +are the GPT + +00:09:13.840 --> 00:09:17.280 +from OpenAI, +and Bard from Google. + +00:09:17.760 --> 00:09:22.680 +But new and bigger models +continually appear + +00:09:23.360 --> 00:09:25.720 +that outperform previous one + +00:09:25.720 --> 00:09:31.200 +because they are a performance continuous +to a scale + +00:09:31.400 --> 00:09:37.440 +as more parameters are added to their +models and more data are added. + +00:09:38.640 --> 00:09:42.920 +However, and despite these great advance + +00:09:43.440 --> 00:09:46.160 +there are issues + +00:09:46.160 --> 00:09:51.240 +in the accessibility scope +challenges to address. + +00:09:51.240 --> 00:09:55.280 +One of them is bias. + +00:09:55.280 --> 00:09:59.000 +Language models have +different types of bias + +00:09:59.440 --> 00:10:03.240 +such as gender, race and disability + +00:10:03.840 --> 00:10:07.680 +but gender and race + +00:10:07.760 --> 00:10:11.040 +biases are highly analyzed. + +00:10:11.640 --> 00:10:14.640 +However it is in the case + +00:10:14.720 --> 00:10:16.840 +with disability biases. + +00:10:17.960 --> 00:10:22.240 +It has been a relatively underexplored. + +00:10:23.040 --> 00:10:27.600 +There are studies relative +this matter for for example + +00:10:27.600 --> 00:10:32.880 +in in these work +in the in the sentiment and analysis text + +00:10:33.400 --> 00:10:37.200 +the terms relative to disability + +00:10:37.200 --> 00:10:40.320 +have a negative value + +00:10:40.320 --> 00:10:45.120 +or in another work using a +model to moderate conversation + +00:10:45.120 --> 00:10:48.840 +classified takes mentions of disability + +00:10:49.080 --> 00:10:52.360 +as more toxics. + +00:10:53.200 --> 00:10:57.600 +That is algorithms +are trained to be restful + +00:10:57.600 --> 00:11:00.040 +that can be offensive and cause + +00:11:00.040 --> 00:11:03.000 +disadvantage to individual +with disabilities. + +00:11:04.240 --> 00:11:07.320 +So, a investigation is necessary + +00:11:07.320 --> 00:11:13.000 +to study in depth models to reduce biases. + +00:11:13.000 --> 00:11:16.640 +We cannot only use these language model +and directly + +00:11:16.640 --> 00:11:19.520 +use the outcome. + +00:11:20.240 --> 00:11:22.520 +Another problem with these model + +00:11:23.520 --> 00:11:26.880 +is that there aren’t too many dataset + +00:11:27.360 --> 00:11:30.000 +related to accessibility area. + +00:11:30.000 --> 00:11:34.000 +For instance, there a few label corpora + +00:11:34.400 --> 00:11:36.760 +to be used in training simplification, + +00:11:37.200 --> 00:11:41.280 +algorithms, +lexical or syntactic simplification + +00:11:41.880 --> 00:11:43.600 +in natural language processing. + +00:11:43.600 --> 00:11:50.720 +I work in cognitive accessibility +in a in Spanish to simplify text + +00:11:50.720 --> 00:11:57.960 +to plain language and easy +reading language. To carry out this case + +00:11:57.960 --> 00:12:02.680 +we had to create a corpus +with an expert in easy reading, + +00:12:03.000 --> 00:12:05.680 +with the participation of older people + +00:12:06.360 --> 00:12:10.680 +and with people with disability +intellectual disabilities + +00:12:11.480 --> 00:12:14.480 +because the current corpora +had been created + +00:12:14.480 --> 00:12:19.520 +with non expert in disability, +non expert in plain language + +00:12:19.960 --> 00:12:24.000 +and they haven't taken into account +the people with disability. + +00:12:25.560 --> 00:12:30.360 +Also an effort devoted to solving this + +00:12:30.360 --> 00:12:34.040 +scarcity of resources are required + +00:12:34.040 --> 00:12:37.200 +in language with low resources. + +00:12:37.680 --> 00:12:42.600 +English is the language with more developed +with many natural language processing. + +00:12:42.600 --> 00:12:46.200 +But others, such as Spanish, have hardly + +00:12:46.200 --> 00:12:49.280 +any resources. We need system + +00:12:49.400 --> 00:12:54.720 +trained for English language +to work for Spanish as well. + +00:12:54.720 --> 00:12:58.680 +And finally, +with the proliferation of GPT models + +00:12:58.680 --> 00:13:03.160 +with application +such as ChatGPT + +00:13:03.480 --> 00:13:06.320 +another problem to address + +00:13:06.360 --> 00:13:09.760 +is the regulation on ethical aspect of + +00:13:10.800 --> 00:13:14.920 +artificial intelligence. + +00:13:15.840 --> 00:13:17.240 +Okay, thank you so much. + +00:13:17.240 --> 00:13:22.280 +Lourdes, definitely +some very relevant challenges in there. + +00:13:23.600 --> 00:13:26.160 +Vikas, I’ll end this first round with you. + +00:13:27.440 --> 00:13:30.360 +Thank you Carlos. +I’m Vikas Ashok + +00:13:30.440 --> 00:13:34.600 +from Old Dominion University +in Virginia United States. + +00:13:35.160 --> 00:13:37.680 +So I have been working researching + +00:13:37.680 --> 00:13:41.480 +in the area of accessible +computing for like ten years now. + +00:13:42.000 --> 00:13:46.920 +And my specialty focus area +is of people with visual disabilities. + +00:13:47.320 --> 00:13:50.840 +So I have mostly concentrated +on their accessibility + +00:13:50.840 --> 00:13:54.000 +as well as usability needs +when it comes to interacting + +00:13:54.000 --> 00:13:56.280 +with computer applications. + +00:13:57.080 --> 00:14:01.560 +So with the topic at hand, +which is accessible communication. + +00:14:01.600 --> 00:14:04.560 +So one of the projects +that I'm currently looking at + +00:14:04.560 --> 00:14:10.080 +is understandably of social media content +for people + +00:14:10.600 --> 00:14:13.920 +who listen to content such as, +you know, people who are blind. + +00:14:14.640 --> 00:14:18.000 +So listening, +you know, social media content, + +00:14:18.000 --> 00:14:21.640 +text is not the same as looking at it. + +00:14:22.120 --> 00:14:25.320 +So, even though the social media text + +00:14:25.320 --> 00:14:31.160 +is accessible, it's not necessarily +understandable because of presence + +00:14:31.160 --> 00:14:34.880 +of a lot of nonstandard language + +00:14:34.880 --> 00:14:37.880 +content in social media such as Twitter. + +00:14:37.920 --> 00:14:39.600 +Like people create their own words. + +00:14:39.600 --> 00:14:44.040 +They're very inventive there, +so they hardly follow any grammar. + +00:14:44.880 --> 00:14:48.920 +So text to speech systems +such as those used in screen + +00:14:48.920 --> 00:14:52.920 +readers cannot necessarily pronounce + +00:14:52.960 --> 00:14:56.480 +these out of +vocabulary words in the right way, + +00:14:56.880 --> 00:14:59.920 +because most of these words, +even though they're in text form, + +00:15:00.360 --> 00:15:05.200 +they're mostly intended for visual consumption, +such as some kind of exaggeration + +00:15:05.560 --> 00:15:10.680 +where the letters are duplicated +just for some kind of additional effect. + +00:15:11.000 --> 00:15:14.800 +Sometimes even emotions are attached +to the text itself without any, + +00:15:15.160 --> 00:15:17.480 +you know, emoticons or anything else. + +00:15:17.960 --> 00:15:22.640 +So and sometimes they try +to phonetically match it, + +00:15:22.640 --> 00:15:27.080 +use a different spelling for a word +just for fun purposes. + +00:15:27.240 --> 00:15:33.320 +So this kind of fun as communication +has increased tremendously on social media + +00:15:33.320 --> 00:15:39.000 +and people are depending on social media +to understand or get news, even, + +00:15:39.400 --> 00:15:42.760 +you know, some kind of disaster news +or something happens anywhere. + +00:15:42.760 --> 00:15:46.680 +Some even, they first flock to the +social media to get it. So + +00:15:47.680 --> 00:15:48.880 +people who listen + +00:15:48.880 --> 00:15:53.760 +to content also should be able to easily +understand, so I’m focusing on that area + +00:15:53.760 --> 00:15:57.480 +how to use NLP to make this possible. + +00:15:58.360 --> 00:16:02.360 +Because even though this is not exactly +a question + +00:16:02.440 --> 00:16:08.400 +of accessibility in a conventional sense, +but it's more like accessibility + +00:16:08.400 --> 00:16:12.960 +in terms of being able to understand +the already accessible content. + +00:16:12.960 --> 00:16:14.920 +So that's one of the things. + +00:16:14.920 --> 00:16:18.760 +The other thing that we're looking at, +which is related to this panel + +00:16:18.800 --> 00:16:22.600 +is the... related to the bias, disability + +00:16:22.600 --> 00:16:27.720 +bias of natural language models, +especially those large language models. + +00:16:28.480 --> 00:16:33.960 +So unfortunately, these models +are reflective of the data it's trained on + +00:16:34.600 --> 00:16:40.720 +because most of the data associates words +that are used to describe people + +00:16:40.720 --> 00:16:45.080 +with disabilities somehow end up having +negative connotation. + +00:16:45.360 --> 00:16:47.680 +So they're using negative context. + +00:16:48.040 --> 00:16:51.760 +So it's nobody's telling these models +to learn it that way + +00:16:52.320 --> 00:16:56.640 +except that the documents +or the text corpus that these models + +00:16:56.640 --> 00:17:01.240 +are looking at inherently +put these words that are, + +00:17:01.680 --> 00:17:06.200 +you know, many times +not offensive into the negative category. + +00:17:07.440 --> 00:17:10.080 +So I'm looking at how we can counter + +00:17:10.080 --> 00:17:14.160 +this. One example is toxicity detection + +00:17:14.320 --> 00:17:19.280 +in discussion forums, online discussion +forums are very popular. + +00:17:19.280 --> 00:17:22.280 +People go there, sometimes anonymously, + +00:17:22.280 --> 00:17:24.720 +post content, interact with each other. + +00:17:25.320 --> 00:17:28.440 +And, you know, +some of the posts get flagged + +00:17:28.560 --> 00:17:32.360 +as, you know, toxic +or this get filtered out. + +00:17:32.680 --> 00:17:35.800 +So even if they're not toxic + +00:17:36.160 --> 00:17:41.520 +because of the use of certain words +to describe disabilities or something. + +00:17:41.520 --> 00:17:43.760 +So we want to avoid that. + +00:17:43.760 --> 00:17:47.920 +So how do we +how can we use NLP to not do that. + +00:17:48.520 --> 00:17:52.920 +So these two projects are pretty much +what's closely related to the panel. + +00:17:53.400 --> 00:17:55.160 +So stick to these. + +00:17:55.160 --> 00:17:58.840 +This session. + +00:17:58.840 --> 00:17:59.240 +Thank you, Vikas. + +00:18:00.920 --> 00:18:05.720 +I'll follow up with that +with what you've mentioned and Lourdes + +00:18:05.800 --> 00:18:08.960 +has also previously highlighted + +00:18:08.960 --> 00:18:14.320 +the disability bias +and... and I'm wondering + +00:18:14.320 --> 00:18:18.240 +if you have any ideas +and suggestions on how can + +00:18:19.560 --> 00:18:22.320 +NLP tools + +00:18:22.920 --> 00:18:24.960 +address such issues. + +00:18:24.960 --> 00:18:27.960 +I'm thinking, +for instance, text summarization tools, + +00:18:27.960 --> 00:18:30.680 +but also other also NLP tools. + +00:18:30.680 --> 00:18:33.840 +How can they help us +address issues of disability bias, + +00:18:33.840 --> 00:18:37.920 +but also how can they explore other aspects + +00:18:37.920 --> 00:18:41.280 +like accountability or personalization + +00:18:41.280 --> 00:18:44.320 +of in the case of + +00:18:44.320 --> 00:18:45.480 +text summaries? + +00:18:45.480 --> 00:18:49.200 +And how can I personalize a summary + +00:18:49.200 --> 00:18:53.080 +for specific audiences +for the needs of specific people? + +00:18:54.200 --> 00:18:56.640 +I'll start with you now Lourdes. + +00:18:56.640 --> 00:18:57.600 +OK. + +00:18:58.680 --> 00:19:00.080 +Text summarization is + +00:19:00.080 --> 00:19:02.680 +a natural language +processing task. + +00:19:02.680 --> 00:19:04.920 +Is a... is a great resource + +00:19:06.720 --> 00:19:08.360 +because improve cognitive + +00:19:08.360 --> 00:19:14.080 +accessibility in order to help people +people with disabilities to process alone + +00:19:14.080 --> 00:19:16.880 +and deduce text. + +00:19:17.400 --> 00:19:20.280 +Also, in the web +content accessibility guidelines + +00:19:20.760 --> 00:19:25.200 +following success criteria 3.1.5 +Reading Level + +00:19:25.200 --> 00:19:29.720 +the readable summary is a + +00:19:30.160 --> 00:19:33.000 +resource that is recommended. + +00:19:34.640 --> 00:19:37.920 +But these task has challenges. + +00:19:38.320 --> 00:19:42.680 +Such us, bias, disability biasis. + +00:19:43.040 --> 00:19:46.000 +And the summaries generated + +00:19:46.000 --> 00:19:49.320 +are understandable +for people with disability is + +00:19:50.000 --> 00:19:54.520 +is at a is understandable +for people with disability + +00:19:55.080 --> 00:20:00.560 +therefore some aspects must be taken +into account: is necessary + +00:20:00.560 --> 00:20:06.320 +to approach this task +which is summarize of the extractive type + +00:20:07.520 --> 00:20:12.240 +where the extracted sentences +can be modified with paraphrases + +00:20:12.320 --> 00:20:17.400 +resources and help understandability +and readability of the text. + +00:20:18.520 --> 00:20:22.400 +To summarize text +different input are required + +00:20:22.960 --> 00:20:28.440 +not only knowledge about the sequences +of words or other + +00:20:28.800 --> 00:20:32.600 +leads about sentences, + +00:20:33.000 --> 00:20:37.320 +but also about the target of audience +is important. + +00:20:37.440 --> 00:20:40.480 +Different type of user + +00:20:40.480 --> 00:20:44.760 +require different type +or personalization of summaries. + +00:20:46.400 --> 00:20:50.520 +It also a + +00:20:50.760 --> 00:20:53.320 +I think that this is + +00:20:53.760 --> 00:20:56.280 +it will be recommendable to include + +00:20:56.480 --> 00:21:00.720 +readability metric +in the summary generation process + +00:21:01.240 --> 00:21:06.560 +to ensure that the resulting +summary is minimally readable. + +00:21:08.400 --> 00:21:09.000 +For instance + +00:21:09.000 --> 00:21:10.680 +if + +00:21:10.680 --> 00:21:15.880 +we are in the context of a system +that provides summaries of public + +00:21:15.960 --> 00:21:18.560 +administration information +for old people, + +00:21:19.000 --> 00:21:22.880 +it's necessary +to take into account that the summary + +00:21:23.160 --> 00:21:26.600 +must be in plain language, + +00:21:26.760 --> 00:21:30.520 +therefore in addition +to extract the relevant sentences + +00:21:30.520 --> 00:21:35.760 +and paraphrases it will be necessary +to include knowledge about of guideline + +00:21:35.760 --> 00:21:39.720 +of plain language to make +the text easier to read + +00:21:40.560 --> 00:21:45.480 +and finally corpora use + +00:21:46.240 --> 00:21:50.280 +to train natural language +processing system shall be tested + +00:21:50.280 --> 00:21:54.880 +with users it in order to attain +useful solution. + +00:21:55.520 --> 00:21:59.520 +Only then it will be possible +to obtain understandable summaries + +00:21:59.520 --> 00:22:03.440 +for the whole of society and the elderly + +00:22:03.720 --> 00:22:07.920 +and and with respect to accountability + +00:22:09.120 --> 00:22:13.600 +as as in every artificial intelligence +algorithm, + +00:22:13.680 --> 00:22:16.200 +these must be explainable + +00:22:18.000 --> 00:22:21.120 +so is necessary to respond to answer + +00:22:21.680 --> 00:22:25.160 +to questions such as how processing + +00:22:25.160 --> 00:22:29.440 +actually perform, +the limitation of the dataset + +00:22:29.880 --> 00:22:34.960 +used to train and test algorithms +and the outcomes of the model + +00:22:36.040 --> 00:22:37.480 +a therefore good + +00:22:37.480 --> 00:22:41.640 +data manager, management +and machine learning models + +00:22:41.640 --> 00:22:42.640 +trained in practice + +00:22:42.640 --> 00:22:46.320 +shall be promote +to ensure quality results. + +00:22:49.080 --> 00:22:51.000 +And nothing else. + +00:22:51.000 --> 00:22:52.880 +Thank you Lourdes. + +00:22:53.800 --> 00:22:58.920 +Vikas, do you want to... even though we... +from what I understood, + +00:22:58.920 --> 00:23:03.480 +you don't work directly +with text summarization but still others + +00:23:03.480 --> 00:23:07.720 +aspects of disability +bias, accountability, + +00:23:07.720 --> 00:23:11.040 +personalization, impact what you're doing. + +00:23:12.080 --> 00:23:16.760 +Yeah, I mean I use a lot of text summarization +so I can add to it. + +00:23:16.760 --> 00:23:21.520 +So to add to what Lourdes said, + +00:23:22.440 --> 00:23:26.560 +simplification +is also as important as summarization + +00:23:26.920 --> 00:23:32.240 +because sometimes it's not just +summarizing or shortening the content + +00:23:32.400 --> 00:23:35.840 +to be consumed, but it's also making +it understandable, like I said. + +00:23:36.280 --> 00:23:40.040 +So that means that certain complex +sentence structures + +00:23:40.040 --> 00:23:41.680 +and some exotic words + +00:23:41.680 --> 00:23:44.880 +we need to replace them +with equal and easier + +00:23:44.880 --> 00:23:48.080 +to understand +more frequently used words. + +00:23:48.240 --> 00:23:54.080 +So there there is some work there that has +been gone into text simplification. + +00:23:54.120 --> 00:23:57.840 +We created some kind of summarization +in the special case. + +00:23:58.160 --> 00:24:01.640 +It's from the same language +to text from between the same language. + +00:24:01.840 --> 00:24:05.800 +So the input is a text +in the same language as the output text, + +00:24:06.000 --> 00:24:09.720 +except that the output text +is more readable, more understandable. + +00:24:10.320 --> 00:24:12.360 +So that is extremely important. + +00:24:12.360 --> 00:24:13.440 +The other thing is + +00:24:14.400 --> 00:24:15.240 +summarization. + +00:24:15.240 --> 00:24:19.240 +Most of them tend +to rely extractive summarization wherein + +00:24:19.560 --> 00:24:24.080 +they just pick certain sentences +from the original piece of text + +00:24:24.760 --> 00:24:26.840 +so that they don't have to worry about the + +00:24:27.560 --> 00:24:30.800 +grammatical correctness +and proper sentence structures + +00:24:31.320 --> 00:24:34.400 +so that because they rely on humans + +00:24:34.400 --> 00:24:37.320 +who have written the text +in order to generate the summaries. + +00:24:37.760 --> 00:24:40.080 +So I can speak about + +00:24:40.440 --> 00:24:44.840 +how summarization needs to be personalized +in certain way for certain groups, + +00:24:44.960 --> 00:24:47.560 +especially for people +with visual disabilities. + +00:24:47.920 --> 00:24:51.720 +So what I have noticed in +some of my studies is that + +00:24:52.640 --> 00:24:56.480 +even though they can hear it, +they don't necessarily understand it + +00:24:56.480 --> 00:24:59.840 +because the writing +is sort of visual in the sense + +00:24:59.840 --> 00:25:03.040 +it needs to need you +to be visually imaginative. + +00:25:03.600 --> 00:25:06.560 +So what is the alt... +the non-visual alternative + +00:25:07.240 --> 00:25:09.480 +for such kind of text? + +00:25:09.840 --> 00:25:15.080 +So how do you summarize the text that +includes a lot of visual elements to it. + +00:25:15.280 --> 00:25:18.080 +So how do you convert it into non + +00:25:18.840 --> 00:25:21.520 +equal non-visual explanations? + +00:25:21.640 --> 00:25:26.000 +This necessarily goes +beyond the extractive summarization. + +00:25:26.000 --> 00:25:27.760 +You cannot just pick and choose, + +00:25:27.760 --> 00:25:31.760 +so you need to replace the wordings +in the sentence. + +00:25:31.760 --> 00:25:36.120 +By other wordings that they can understand +and some of the text, + +00:25:36.120 --> 00:25:37.960 +you know, these days, especially + +00:25:37.960 --> 00:25:42.240 +the articles, news articles and all, +they don't come purely as text. + +00:25:42.240 --> 00:25:44.920 +They're sort of multi-modal in the sense + +00:25:45.240 --> 00:25:48.000 +there are pictures, there are GIFs everything. + +00:25:48.000 --> 00:25:51.200 +And the text sort of refers +to these pictures + +00:25:52.120 --> 00:25:56.840 +so that this is another problem +because then it becomes highly visual. + +00:25:56.840 --> 00:26:00.240 +So you have to take +some of the visual elements + +00:26:00.240 --> 00:26:03.920 +of the picture, probably through computer +vision techniques or something, + +00:26:03.920 --> 00:26:08.760 +and then inject it into the text +in order to make it more self-sufficient + +00:26:08.760 --> 00:26:12.800 +and understandable +for people who cannot see the images. + +00:26:13.760 --> 00:26:17.240 +So that's my take on it. + +00:26:17.280 --> 00:26:23.240 +Yeah, that's a very good point +about the multimedia information + +00:26:23.240 --> 00:26:26.320 +and how do we summarize everything +into text. + +00:26:26.320 --> 00:26:28.920 +Yeah, that's a great point. + +00:26:28.920 --> 00:26:31.560 +Chaohai, your take on this? + +00:26:31.560 --> 00:26:33.240 +Oh yes. Yeah. + +00:26:33.240 --> 00:26:36.880 +But we don't know much experience +in text summarization. + +00:26:36.880 --> 00:26:42.640 +Most our research is focused +on the residual AAC and interlinking + +00:26:42.680 --> 00:26:46.600 +and the AAC generation, +but we do how well the project + +00:26:47.040 --> 00:26:49.200 +involved part of the text summarization. + +00:26:50.040 --> 00:26:53.240 +We construct a knowledge graph + +00:26:53.640 --> 00:26:56.520 +for e-learning platform +and that we need to + +00:26:57.680 --> 00:26:58.880 +extract + +00:26:59.280 --> 00:27:04.520 +the text summarization from lecture notes +to, make it easier + +00:27:04.520 --> 00:27:07.680 +and accessible for people, students + +00:27:07.680 --> 00:27:11.160 +with disabilities. So, + +00:27:11.160 --> 00:27:15.440 +so based on that project, + +00:27:15.440 --> 00:27:19.440 +what we learned is that text +summarization is very difficult task + +00:27:20.880 --> 00:27:24.480 +in NLP because these are +highly dependent on the text + +00:27:25.600 --> 00:27:27.240 +context domain or + +00:27:27.240 --> 00:27:31.040 +target audience and even the goal summary. + +00:27:31.040 --> 00:27:36.680 +For example, in our scenario, +we want to have the summary of + +00:27:37.960 --> 00:27:39.880 +each lecture notes, + +00:27:39.880 --> 00:27:43.480 +but we a very long transcripts +in that lecture. + +00:27:43.920 --> 00:27:46.280 +So we use a few + +00:27:47.480 --> 00:27:49.680 +text summarization models to generate + +00:27:52.360 --> 00:27:55.200 +the summaries, but the outcome is not good + +00:27:56.360 --> 00:27:56.920 +somewhere. + +00:27:56.920 --> 00:27:59.120 +It is mainly as + +00:28:00.280 --> 00:28:03.520 +Vikas just said, some of the text + +00:28:03.520 --> 00:28:07.880 +summarization is just pick +some of the text and replace + +00:28:07.880 --> 00:28:12.960 +some of the words that say so +or even some that doesn't make sense. + +00:28:13.360 --> 00:28:16.520 +So that's the one. + +00:28:16.520 --> 00:28:20.640 +One problem +we identified in text summarization + +00:28:21.120 --> 00:28:24.000 +and we also have some + +00:28:26.000 --> 00:28:28.200 +method to + +00:28:28.920 --> 00:28:29.800 +to read + +00:28:31.440 --> 00:28:33.360 +because we we + +00:28:33.360 --> 00:28:36.400 +we need to personalize +because the project is + +00:28:37.240 --> 00:28:41.280 +related to the adaptive learning +for individual students. + +00:28:41.280 --> 00:28:44.080 +We need a personalization +for each student. So + +00:28:45.200 --> 00:28:47.920 +personalization could be + +00:28:47.920 --> 00:28:51.760 +customized, adapted to user need. + +00:28:52.240 --> 00:28:55.760 +But this is actually + +00:28:55.760 --> 00:28:58.920 +can can be improved the ways + +00:29:01.000 --> 00:29:04.000 +users’ personal preferences + +00:29:04.400 --> 00:29:06.720 +or feedback and + +00:29:08.200 --> 00:29:11.040 +and also allow user to set the + +00:29:12.600 --> 00:29:14.800 +summary goal and... + +00:29:14.800 --> 00:29:20.120 +and also the simplification +is very important because some students + +00:29:20.360 --> 00:29:25.320 +may have cognitive disability +or or other type of disability. + +00:29:25.320 --> 00:29:28.200 +They need to have simplified or blend + +00:29:28.760 --> 00:29:33.720 +plain language. Yet. + +00:29:33.720 --> 00:29:35.760 +Yeah, I think that's mainly what we + +00:29:37.200 --> 00:29:40.840 +have for text summarization. + +00:29:40.840 --> 00:29:41.520 +Thank you, Chaohai. + +00:29:41.600 --> 00:29:43.920 +Thank you. Uh. + +00:29:43.920 --> 00:29:48.480 +Okay, so let's move on to what we started +with the challenges + +00:29:48.480 --> 00:29:53.240 +and now I would like to move on +to the future perspectives. + +00:29:53.240 --> 00:29:58.440 +How do... what are the breakthroughs +that you see happening + +00:29:58.960 --> 00:30:01.280 +promoted by the use of NLP + +00:30:02.080 --> 00:30:04.320 +for accessible communication. + +00:30:04.680 --> 00:30:06.920 +And I'll we'll start with you now, Vikas. + +00:30:10.960 --> 00:30:12.840 +So my + +00:30:12.840 --> 00:30:16.200 +perspective +is that there's plenty of NLP, + +00:30:16.200 --> 00:30:20.120 +you know tools out there already +that haven’t been exploited + +00:30:20.120 --> 00:30:24.760 +to the fullest extent to address +accessibility and usability issues. + +00:30:25.280 --> 00:30:27.960 +The growth in NLP techniques + +00:30:27.960 --> 00:30:33.080 +and methods that has been extremely steep +in the recent years + +00:30:33.080 --> 00:30:37.080 +and the rest of us in different +fields are trying to catch up. + +00:30:37.560 --> 00:30:39.840 +I mean, still, +there is a lot to be explored + +00:30:40.400 --> 00:30:43.320 +as to how they can be used to address + +00:30:43.320 --> 00:30:45.880 +real world accessibility problems, + +00:30:46.360 --> 00:30:51.120 +and we are in the process of doing that, +I would say so + +00:30:51.120 --> 00:30:53.320 +text summarization is one thing + +00:30:54.240 --> 00:30:57.000 +that we discussed already, +which can be exploited + +00:30:57.000 --> 00:31:01.840 +in a lot of scenarios +to improve the efficiency + +00:31:01.840 --> 00:31:05.880 +of computer interaction +for people with disabilities. + +00:31:06.240 --> 00:31:09.840 +But the main problem, as we discussed +not only in this panel + +00:31:09.840 --> 00:31:12.120 +but also on other panels, is the data. + +00:31:12.120 --> 00:31:16.560 +So for some languages +there is enough ... corpus + +00:31:16.560 --> 00:31:19.960 +where the translation is good, +because the translation + +00:31:19.960 --> 00:31:23.280 +essentially depends on +how much data you have trained on. + +00:31:23.640 --> 00:31:27.200 +But for some pair of +languages it may not be that + +00:31:28.200 --> 00:31:28.800 +easy. + +00:31:28.800 --> 00:31:31.680 +Or even if it +does something may not be that accurate. + +00:31:31.680 --> 00:31:33.800 +So that may be a problem. + +00:31:33.800 --> 00:31:36.920 +And then the biggest area where I see, + +00:31:37.480 --> 00:31:41.160 +which can be very useful for solving + +00:31:41.160 --> 00:31:46.560 +many accessibility problems +is the improvement in dialog systems. + +00:31:46.560 --> 00:31:49.560 +So natural language dialog is more like + +00:31:49.560 --> 00:31:52.000 +a really intuitive interface for many + +00:31:53.360 --> 00:31:56.840 +users, including many people +with disabilities. + +00:31:57.360 --> 00:32:00.240 +So those who have physical impairments +which + +00:32:01.280 --> 00:32:05.040 +prevent them from conveniently +using the keyboard or the mouse + +00:32:05.040 --> 00:32:08.080 +and those are blind +who have to use screen readers, + +00:32:08.080 --> 00:32:11.040 +which is time +consuming, is known to be time consuming. + +00:32:11.040 --> 00:32:15.080 +So dialog assistants are, I would say + +00:32:16.320 --> 00:32:17.720 +under-explored... +they're still exploring it. + +00:32:17.720 --> 00:32:21.840 +We see that commercialization is going on +like smartphones and all, + +00:32:22.080 --> 00:32:26.240 +but still it's at the level +of some high level interaction + +00:32:26.240 --> 00:32:30.840 +like setting alarms or turning on lights +and answering some questions. + +00:32:31.280 --> 00:32:34.760 +But what about using that to interact +with applications + +00:32:34.760 --> 00:32:36.760 +in the context of an application? + +00:32:37.360 --> 00:32:39.480 +So if I see a play, + +00:32:41.040 --> 00:32:41.760 +I had a user + +00:32:41.760 --> 00:32:44.760 +comment to this particular document. + +00:32:44.760 --> 00:32:49.280 +It's in word or Google Docs. So can an +assistant spoken + +00:32:49.280 --> 00:32:52.200 +dialog assistant +understand that an automated + +00:32:53.360 --> 00:32:55.440 +means so this kind of automation + +00:32:56.240 --> 00:33:01.240 +will sort of address, +I feel will address many of the issues + +00:33:01.240 --> 00:33:04.640 +that people face +interacting with digital content. + +00:33:04.640 --> 00:33:09.080 +So that's one of the things I would say +we can use NLP for. + +00:33:09.480 --> 00:33:15.120 +The other thing is the increased +availability of large language + +00:33:15.120 --> 00:33:20.280 +models, Pre-trained models +like one Lourdes mentioned, like GPT, + +00:33:20.840 --> 00:33:25.680 +which is essentially transformer decoder +or generator based model. + +00:33:25.680 --> 00:33:28.040 +Then there's also Bert, +which encoder based. + +00:33:28.440 --> 00:33:32.040 +So these help us, you know, + +00:33:32.120 --> 00:33:36.920 +in a way that we don't need large +amounts of data to solve problems + +00:33:36.920 --> 00:33:40.320 +because they're already pre-trained +on a large amount of data. + +00:33:40.800 --> 00:33:44.560 +So what we would need are kind of small + +00:33:44.760 --> 00:33:49.800 +data sets that are more fine tuned +towards the problem we are addressing. + +00:33:50.520 --> 00:33:53.960 +So the datasets, +they're accessibility datasets. + +00:33:53.960 --> 00:33:57.600 +They're I think there needs to be +a little bit more investment + +00:33:58.480 --> 00:34:02.960 +doesn't have to be that big +because the large language models + +00:34:02.960 --> 00:34:06.640 +already take care of most of the language +complexity. + +00:34:06.760 --> 00:34:08.200 +It's more like fine tuning + +00:34:09.160 --> 00:34:10.560 +the problem at hand. + +00:34:10.560 --> 00:34:14.400 +So that's where I think +some effort should go. + +00:34:14.400 --> 00:34:18.800 +And once we do that, obviously +we can fine tune and solve the problems + +00:34:18.800 --> 00:34:23.640 +and then there is a tremendous enhancement +or advancement + +00:34:23.640 --> 00:34:27.920 +in transport learning techniques +which we can exploit that as well, + +00:34:28.440 --> 00:34:32.280 +in order to not do stuff from scratch, +instead borrow + +00:34:32.280 --> 00:34:35.640 +some things that are already there +for something different. + +00:34:36.320 --> 00:34:38.280 +I mean, similar problem. + +00:34:38.280 --> 00:34:43.880 +So, so there is a lot to be explored, +but we haven't done that yet. + +00:34:43.880 --> 00:34:45.920 +So there's plenty of opportunity + +00:34:45.920 --> 00:34:48.960 +for research +using NLP expertise for + +00:34:49.560 --> 00:34:53.400 +problems in accessible communication, +especially. + +00:34:53.400 --> 00:34:56.160 +Yes, definitely +some exciting avenues there. + +00:34:57.120 --> 00:35:00.680 +So, Chaohai, can we have your take on this? + +00:35:01.120 --> 00:35:04.080 +What are your breakthroughs? + +00:35:04.200 --> 00:35:04.960 +OK. + +00:35:04.960 --> 00:35:08.560 +Just listened them, +Vikas, I totally agree with him. + +00:35:08.880 --> 00:35:09.240 +He's + +00:35:11.160 --> 00:35:13.200 +all opinions and + +00:35:13.200 --> 00:35:15.320 +for... for my research + +00:35:15.720 --> 00:35:18.480 +because I've mainly worked on AAC, +so, currently, + +00:35:19.160 --> 00:35:22.160 +so I would take AAC for example. + +00:35:22.920 --> 00:35:26.120 +So the future perspective for AAC, + +00:35:27.040 --> 00:35:29.200 +NLP for AAC, I think the first of + +00:35:29.240 --> 00:35:32.920 +that will be the personalized adaptive + +00:35:34.160 --> 00:35:37.480 +communication for each individual, +because + +00:35:39.240 --> 00:35:42.520 +each individual +has their own communication, + +00:35:43.640 --> 00:35:45.760 +their own way to communicate +with each other + +00:35:46.120 --> 00:35:49.200 +and NLP techniques can be used to make + +00:35:50.320 --> 00:35:52.720 +this communication more accessible, + +00:35:52.920 --> 00:35:56.480 +more personalized +and adaptive based on their + +00:35:58.000 --> 00:36:01.200 +personal preferences of feedback. + +00:36:02.440 --> 00:36:06.520 +So this can can be used to + +00:36:06.840 --> 00:36:10.320 +for personalize the AAC symbols so + +00:36:11.320 --> 00:36:14.440 +currently, AAC users + +00:36:14.440 --> 00:36:19.080 +they just using some standard +AAC symbol set for + +00:36:19.160 --> 00:36:24.480 +their daily communications, +so how can we use NLP to + +00:36:25.680 --> 00:36:29.520 +and generic and the generic AI models + +00:36:30.240 --> 00:36:34.720 +to create a more customized +personalized AAC symbols + +00:36:36.080 --> 00:36:37.440 +that's which + +00:36:37.440 --> 00:36:40.800 +which you could be have ability +to adapt to the + +00:36:42.720 --> 00:36:45.240 +individual's +unique cultural and social needs. + +00:36:45.560 --> 00:36:48.240 +I think that's one potentially + +00:36:49.280 --> 00:36:52.280 +contribute to the AAC users. + +00:36:52.800 --> 00:36:56.160 +The second one will be accessible multi + +00:36:56.320 --> 00:36:59.440 +modal communication + +00:37:00.120 --> 00:37:01.720 +because a + +00:37:02.000 --> 00:37:03.920 +that NLP techniques + +00:37:03.920 --> 00:37:07.560 +they have the potential to enhance this + +00:37:08.520 --> 00:37:11.600 +accessible communication by improving + +00:37:12.960 --> 00:37:16.320 +interoperability in training data + +00:37:17.000 --> 00:37:22.160 +and the between the verbal language +sign language and that AAC so data + +00:37:22.160 --> 00:37:26.280 +interoperability could provide +more high quality training data for this + +00:37:28.120 --> 00:37:28.960 +language with + +00:37:28.960 --> 00:37:31.040 +elastic set and + +00:37:32.200 --> 00:37:35.400 +additionally, + +00:37:35.400 --> 00:37:39.720 +it can provide the ability to translate + +00:37:40.280 --> 00:37:43.200 +different communication models + +00:37:43.800 --> 00:37:46.280 +and to make it more accessible +and inclusive. + +00:37:47.480 --> 00:37:50.280 +So in AAC, so we can have + +00:37:51.440 --> 00:37:55.080 +multiple AAC symbol sets can be link + +00:37:55.640 --> 00:37:58.200 +mapped and interlinked by NLP models + +00:37:58.680 --> 00:38:03.200 +and this can be contribute +to the translation between the AAC to AAC + +00:38:03.520 --> 00:38:07.800 +and AAC to text, AAC to some language +and vice versa. + +00:38:08.480 --> 00:38:13.880 +Yeah, that's the, +the the second aspect I think about. + +00:38:13.880 --> 00:38:15.240 +And then the third one is the + +00:38:17.240 --> 00:38:19.720 +AI assistant +communication that Vikas + +00:38:19.720 --> 00:38:23.760 +just, just talk about the ChatGPT. + +00:38:23.760 --> 00:38:26.760 +So with this, + +00:38:26.760 --> 00:38:30.080 +this large language model has been trained + +00:38:30.080 --> 00:38:32.840 +by this big companies and + +00:38:33.960 --> 00:38:35.680 +and they have been widely +spreading on social media. + +00:38:35.680 --> 00:38:38.560 +So how + +00:38:38.560 --> 00:38:40.680 +how to using this + +00:38:40.680 --> 00:38:42.920 +this trained large + +00:38:43.400 --> 00:38:48.360 +language models incorporated +with other applications and then can use it + +00:38:48.360 --> 00:38:53.560 +for a more accessible communication +to help people with disabilities. + +00:38:54.000 --> 00:38:56.800 +That's that's another + +00:38:57.000 --> 00:38:57.560 +future + +00:38:57.560 --> 00:38:59.200 +we are looking for. + +00:38:59.200 --> 00:39:01.320 +The last one +that I'm going to talk about + +00:39:01.560 --> 00:39:04.880 +is more regarding the AAC +because it's quite expensive. + +00:39:05.160 --> 00:39:07.960 +So affordability is very important + +00:39:08.640 --> 00:39:13.080 +and it can be achieved by the NLP + +00:39:13.320 --> 00:39:18.120 +or AI. That's what I mentioned +that we are currently looking into + +00:39:18.640 --> 00:39:23.320 +how to turn images into symbols +and how to generate + +00:39:24.640 --> 00:39:28.200 +AAC symbols automatically by using + +00:39:29.520 --> 00:39:33.720 +image generative AI models +like stable diffusion. So + +00:39:34.840 --> 00:39:37.960 +so that's +the another future we are looking forward + +00:39:37.960 --> 00:39:41.280 +how to reduce the cost +for accessible communication. + +00:39:42.000 --> 00:39:44.280 +Yeah. Thank you. + +00:39:44.280 --> 00:39:44.640 +Thank you, + +00:39:44.640 --> 00:39:47.520 +Chaohai. Definitely a relevant point. + +00:39:47.520 --> 00:39:52.040 +Reducing costs of getting data +and all of that. + +00:39:52.320 --> 00:39:54.120 +That's important everywhere. + +00:39:54.120 --> 00:39:57.120 +So, Lourdes, what are you looking for + +00:39:57.360 --> 00:40:01.840 +in the near future? + +00:40:01.840 --> 00:40:05.880 +And you are muted. + +00:40:05.880 --> 00:40:11.920 +So as we have mentioned before, +there are two trends that are the + +00:40:12.360 --> 00:40:16.680 +the appearance of new and better +language model than the previous one + +00:40:17.120 --> 00:40:19.680 +working in these these new models + +00:40:20.120 --> 00:40:25.080 +and to reduce the disability biases. + +00:40:25.080 --> 00:40:29.040 +Also I am going to list a specific +natural language processing + +00:40:29.040 --> 00:40:31.800 +task and data application + +00:40:32.080 --> 00:40:34.320 +that I will work in the coming year. + +00:40:35.480 --> 00:40:39.960 +And one of them is accessibility +to domain specific. + +00:40:39.960 --> 00:40:41.680 +task, such as, health + +00:40:42.760 --> 00:40:44.720 +the the + +00:40:44.720 --> 00:40:47.960 +health language is highly +demanded need + +00:40:48.960 --> 00:40:51.800 +but patients have problems understanding + +00:40:51.800 --> 00:40:57.240 +information about their health +condition, diagnosis, treatment + +00:40:57.520 --> 00:41:02.560 +and natural language processing +method could improve their understanding + +00:41:02.560 --> 00:41:05.440 +of health related documents. + +00:41:06.400 --> 00:41:10.800 +Similar sample appear in +legal and financial documents, + +00:41:10.800 --> 00:41:14.880 +the language of administration, +e-government... + +00:41:15.480 --> 00:41:18.960 +Current natural language +processing technology + +00:41:18.960 --> 00:41:27.240 +that simplifies and summarizes +this could help in the roadmap. + +00:41:27.240 --> 00:41:29.480 +And another + +00:41:31.000 --> 00:41:33.720 +line is speech to text + +00:41:35.040 --> 00:41:37.800 +speech to text will be a relevant area + +00:41:37.800 --> 00:41:40.800 +of research in the field +of virtual meetings + +00:41:41.720 --> 00:41:46.440 +in order to facilitate accessible, +accessible communication by generation + +00:41:46.440 --> 00:41:50.120 +of summaries of meeting +as well as minutes + +00:41:51.280 --> 00:41:53.640 +in plain language. + +00:41:53.960 --> 00:41:56.320 +Another topic is the + +00:41:57.240 --> 00:42:00.600 +integration of natural language +processing + +00:42:00.600 --> 00:42:03.960 +method into the design + +00:42:03.960 --> 00:42:07.320 +and development of +multimedia user interface + +00:42:08.400 --> 00:42:12.120 +is necessary to face accessible +accessible communication + +00:42:12.120 --> 00:42:17.000 +from a multi-disciplinary approach +between different areas + +00:42:17.000 --> 00:42:20.520 +such as, human computer interaction +software engineering + +00:42:20.840 --> 00:42:24.360 +and natural language processing. + +00:42:24.360 --> 00:42:29.280 +Finally, another +issue is advancing + +00:42:29.280 --> 00:42:33.400 +application in smart assistant +in natural language processing + +00:42:33.920 --> 00:42:37.440 +method to support people with disabilities +and the elderly. + +00:42:38.520 --> 00:42:41.480 +Assist them in their daily tasks, + +00:42:41.760 --> 00:42:46.560 +and promote active living. + +00:42:46.560 --> 00:42:47.920 +Okay thank you so much, Lourdes, + +00:42:47.920 --> 00:42:52.120 +and everyone of you +for for those perspectives. + +00:42:52.520 --> 00:42:55.480 +I guess we still have 5 minutes + +00:42:55.480 --> 00:43:00.520 +more in this session, +so I will risk another question + +00:43:00.520 --> 00:43:05.040 +and I will ask you to +to try to to be brief on this one. + +00:43:05.040 --> 00:43:09.720 +But the the need for data + +00:43:09.720 --> 00:43:13.720 +was common across all your interventions. + +00:43:13.720 --> 00:43:17.480 +And if we go back to the previous +panel, also, + +00:43:17.480 --> 00:43:21.360 +it was brought up by every every panelist. + +00:43:21.360 --> 00:43:23.760 +So yeah, we need data. + +00:43:25.000 --> 00:43:26.880 +What are your thoughts and + +00:43:26.880 --> 00:43:29.600 +how can we make it easier + +00:43:30.120 --> 00:43:32.600 +and to collect more data + +00:43:32.880 --> 00:43:37.120 +for the specific aspect +of accessible communication? + +00:43:37.120 --> 00:43:38.960 +Because, we communicate a lot, right? + +00:43:38.960 --> 00:43:41.280 +Technology has + +00:43:41.680 --> 00:43:43.720 +allowed us and open up + +00:43:44.320 --> 00:43:46.760 +several channels +to where we can communicate + +00:43:46.960 --> 00:43:49.960 +even when we're not co-located. + +00:43:50.520 --> 00:43:52.600 +So yeah, every one of us is + +00:43:53.160 --> 00:43:56.200 +different points of the planet +and we are communicating right now. + +00:43:56.360 --> 00:44:01.440 +Technology improved those that +that possibility a lot. + +00:44:01.880 --> 00:44:04.680 +And however + +00:44:04.680 --> 00:44:07.320 +we always hear this, +we need more data. + +00:44:07.320 --> 00:44:08.520 +We can't get data. + +00:44:08.520 --> 00:44:11.760 +So how do you think we can get more data + +00:44:13.160 --> 00:44:15.240 +is and of course we need + +00:44:15.240 --> 00:44:17.760 +the data to train these models, but + +00:44:18.280 --> 00:44:21.760 +can't we also rely on these models +to generate data? + +00:44:22.960 --> 00:44:25.320 +So let me just + +00:44:25.320 --> 00:44:28.560 +drop this on you now and + +00:44:30.120 --> 00:44:31.320 +any of you want + +00:44:31.320 --> 00:44:37.120 +to go first? + +00:44:37.120 --> 00:44:40.160 +I can go first. Okay. Yeah. Yeah. + +00:44:40.160 --> 00:44:43.320 +Because we have actually working on open + +00:44:43.320 --> 00:44:46.240 +data four years ago before, I mean + +00:44:47.440 --> 00:44:50.680 +the AI and the data science +because when I started my PhD + +00:44:50.840 --> 00:44:55.720 +we working on the open data and +there is a open data initiative in UK. + +00:44:56.040 --> 00:44:59.720 +So we want to open our data +and government data + +00:45:01.120 --> 00:45:05.120 +and the, and the public transport data +and that's + +00:45:05.120 --> 00:45:09.200 +how long working on public transportation +with accessibility needs. + +00:45:09.200 --> 00:45:11.400 +So there's a lack of data. + +00:45:11.520 --> 00:45:13.240 +At the beginning of my Ph.D. + +00:45:13.240 --> 00:45:17.320 +so few years later, the still +lack of the accessibility information data + +00:45:18.000 --> 00:45:19.240 +on this data. + +00:45:19.240 --> 00:45:23.400 +So the I think the so how can we +how is this + +00:45:23.760 --> 00:45:28.920 +I mean, the accessibility area, +how can we have such a data + +00:45:29.320 --> 00:45:32.000 +to to have to train our model? + +00:45:32.000 --> 00:45:36.320 +I mean, the first advice, +what I used to do + +00:45:36.320 --> 00:45:40.560 +with public transport data is I mapped all + +00:45:41.520 --> 00:45:42.760 +available data + +00:45:42.760 --> 00:45:46.400 +into a larger dataset. That's incurred + +00:45:46.480 --> 00:45:50.120 +a lot of labor work +like the cleaning data integration + +00:45:50.120 --> 00:45:54.560 +and all this +method to make data available. + +00:45:55.120 --> 00:45:57.560 +That's the first first approach. + +00:45:57.840 --> 00:46:00.960 +The second is we think about how can we + +00:46:02.200 --> 00:46:05.360 +contribute like + +00:46:05.360 --> 00:46:08.160 +a data repository or something + +00:46:08.160 --> 00:46:13.120 +like an image net or word net +that we can collaborate to together + +00:46:13.160 --> 00:46:15.520 +to contribute the identified + +00:46:16.520 --> 00:46:19.320 +data related to accessibility research. + +00:46:20.160 --> 00:46:25.680 +I think that that's we can as a community, +we can create such a universal + +00:46:26.000 --> 00:46:28.320 +repository or, or + +00:46:30.160 --> 00:46:32.200 +yeah, some kind of data initiative + +00:46:32.200 --> 00:46:35.480 +that we can working on +accessibility research. + +00:46:36.240 --> 00:46:38.920 +And the third approach +is that definitely + +00:46:39.000 --> 00:46:42.440 +that we can generate +the data based on the small data. + +00:46:42.440 --> 00:46:45.400 +We can be using + +00:46:45.840 --> 00:46:48.720 +generative AI model to generate more, + +00:46:49.840 --> 00:46:53.000 +but to +do the question is, is that data reliable? + +00:46:53.080 --> 00:46:58.240 +The data to generate generate enough, +or is that then the bias? + +00:46:58.640 --> 00:47:01.600 +So yeah, that's my my conclusion. + +00:47:01.960 --> 00:47:02.680 +Thank you. + +00:47:03.400 --> 00:47:04.440 +Yes, exactly. + +00:47:04.440 --> 00:47:07.440 +That's the... the big question mark. + +00:47:07.440 --> 00:47:08.200 +Right. + +00:47:08.360 --> 00:47:11.520 +Is that synthetic data +reliable or not, so + +00:47:12.000 --> 00:47:14.440 +Vikas or Lourdes +do you want to add something? + +00:47:15.480 --> 00:47:18.000 +So yeah I mean I have used synthetic data + +00:47:18.120 --> 00:47:21.000 +before based + +00:47:21.040 --> 00:47:24.240 +the little bit of real data +and in some cases + +00:47:24.240 --> 00:47:26.440 +you can generate synthetic data. + +00:47:26.680 --> 00:47:29.640 +So one of the things I had to do +was extract + +00:47:30.400 --> 00:47:32.440 +user comments in documents. + +00:47:33.280 --> 00:47:37.480 +Most of these word processing applications +allow you to post comments + +00:47:37.960 --> 00:47:42.800 +to the right for your collaborators to look at +and then, you know, address them. + +00:47:43.240 --> 00:47:47.360 +So automatically extracting +that I had to generate synthetic data + +00:47:47.360 --> 00:47:51.800 +because obviously you have few documents +with collaborative comments. + +00:47:52.320 --> 00:47:54.800 +So the appearance there is like, okay + +00:47:54.840 --> 00:47:58.360 +comments will appear somewhere +on the right side, right corner, + +00:47:59.080 --> 00:48:02.400 +which will have some text in it +with a few sentences. + +00:48:02.680 --> 00:48:04.680 +So there are some characteristics. + +00:48:04.680 --> 00:48:07.640 +So in those cases +we generated synthetic data. + +00:48:07.640 --> 00:48:09.960 +We train the machine learning model. + +00:48:09.960 --> 00:48:13.720 +It was pretty accurate on this data, +which was like real data. + +00:48:14.520 --> 00:48:16.200 +So... exploit... + +00:48:16.200 --> 00:48:19.320 +In some cases you can exploit the + +00:48:19.680 --> 00:48:23.840 +way data will appear +and then generate the synthetic data. + +00:48:23.920 --> 00:48:27.240 +But in many cases it may not be possible. + +00:48:27.480 --> 00:48:30.480 +Like for the project +I mentioned, social media, + +00:48:30.480 --> 00:48:34.000 +where text contains +a lot of nonstandard words. + +00:48:34.600 --> 00:48:39.800 +Simply replacing the nonstandard words +with synonyms may not do the job + +00:48:39.880 --> 00:48:43.920 +because then you take the fun aspect +away from social media. + +00:48:44.280 --> 00:48:47.320 +Like, it should be as fun and entertaining + +00:48:47.320 --> 00:48:51.120 +when you listen to social media text +as it is when you look at it. + +00:48:51.640 --> 00:48:54.360 +So. So you have to do some kind of clever, + +00:48:55.360 --> 00:48:56.880 +you know, replacement. + +00:48:56.880 --> 00:49:01.040 +And for that you need some kind of expert +human expert going there and + +00:49:03.000 --> 00:49:04.200 +doing that. + +00:49:04.200 --> 00:49:07.920 +So crowdsourcing +I think is one way to get data + +00:49:07.920 --> 00:49:11.040 +quickly and it's pretty reliable. + +00:49:11.440 --> 00:49:14.200 +And see, I've seen in the NLP community + +00:49:14.240 --> 00:49:19.560 +like NLP papers that appear in ACL +and they rely heavily on the Amazon + +00:49:20.160 --> 00:49:23.280 +Mechanical Turk and other online + +00:49:26.160 --> 00:49:29.120 +incentivized data collection mechanisms. + +00:49:29.560 --> 00:49:31.720 +So that I think is one thing. + +00:49:31.720 --> 00:49:35.640 +The other thing I do +know, you know, in my classes + +00:49:35.640 --> 00:49:39.840 +especially, I get the students +to help each other out, collect the data + +00:49:40.160 --> 00:49:43.560 +so it doesn't have to be that intensive +every day. + +00:49:43.560 --> 00:49:47.400 +If they just even one student +collects like ten data points + +00:49:47.880 --> 00:49:52.560 +over the semester, it would be like +enough data for a lot of things. + +00:49:52.560 --> 00:49:56.960 +So you know, in each other projects +and in the end of the course + +00:49:57.120 --> 00:50:00.320 +pretty much they'll have a +lot of data for research. So + +00:50:01.320 --> 00:50:02.120 +you know, + +00:50:02.120 --> 00:50:06.040 +everybody can contribute +in a way and students + +00:50:06.040 --> 00:50:10.200 +especially are much more reliable +because they are familiar + +00:50:10.200 --> 00:50:14.840 +with the mechanisms +how to label collect data. + +00:50:14.840 --> 00:50:18.520 +And also they can understand +how things work as well. + +00:50:18.520 --> 00:50:22.760 +So, it’s like a win-win. + +00:50:22.760 --> 00:50:25.000 +Okay, yeah, thanks for that contribution. + +00:50:25.480 --> 00:50:26.880 +Good suggestion. + +00:50:26.880 --> 00:50:31.040 +And, Lourdes, we are really running out of time + +00:50:31.040 --> 00:50:34.120 +but if you still want to intervene, + +00:50:34.120 --> 00:50:36.200 +I can give you a couple of minutes. + +00:50:37.240 --> 00:50:37.760 +Okay. + +00:50:37.760 --> 00:50:40.720 +Only a I think that also + +00:50:40.720 --> 00:50:46.080 +we don't find many +we need a few data, but in my vision + +00:50:46.080 --> 00:50:49.240 +is also negative because obtaining + +00:50:49.240 --> 00:50:52.640 +the dataset is expensive. + +00:50:52.680 --> 00:50:56.760 +An in accessible communication, +I work in simplification. + +00:50:57.960 --> 00:50:59.880 +these data must be prepared + +00:50:59.880 --> 00:51:01.960 +by the expert in accessibility + +00:51:03.360 --> 00:51:05.720 +is important as these data + +00:51:07.240 --> 00:51:11.400 +is validated +by people with disability + +00:51:11.680 --> 00:51:16.160 +and use +plain language resources + +00:51:16.880 --> 00:51:20.040 +and then it is a problem + +00:51:20.520 --> 00:51:25.640 +to obtain data with quality. + +00:51:26.880 --> 00:51:28.840 +Okay, thank you so much, + +00:51:28.840 --> 00:51:29.320 +Lourdes. + +00:51:29.320 --> 00:51:34.800 +And thanks a very big +thank you to the three of you, Chaohai, + +00:51:34.800 --> 00:51:36.680 +Vikas and Lourdes. It + +00:51:36.760 --> 00:51:38.760 +was a really interesting panel + +00:51:39.480 --> 00:51:42.040 +thank you so much for +for your availability and + diff --git a/pages/about/projects/wai-coop/symposium2-captions/computer_vision.vtt b/pages/about/projects/wai-coop/symposium2-captions/computer_vision.vtt new file mode 100644 index 00000000000..178bcf40724 --- /dev/null +++ b/pages/about/projects/wai-coop/symposium2-captions/computer_vision.vtt @@ -0,0 +1,3133 @@ +WEBVTT +Kind: captions +Language: en + +00:00:02.040 --> 00:00:04.480 +The topic for for this panel + +00:00:05.680 --> 00:00:08.800 +will be computer vision +for media accessibility. + +00:00:09.040 --> 00:00:11.920 +So here we aim to foster + +00:00:11.920 --> 00:00:15.880 +a discussion on the current state +of computer vision techniques + +00:00:16.000 --> 00:00:20.200 +and focus on image recognition +and identification + +00:00:20.200 --> 00:00:24.840 +and recognition of elements +and text in web images and media. + +00:00:25.480 --> 00:00:28.560 +And considering all the different usage + +00:00:28.800 --> 00:00:31.960 +scenarios that + +00:00:31.960 --> 00:00:34.160 +that that emerge on the web. + +00:00:35.080 --> 00:00:38.760 +And so we'll be looking here at aspects + +00:00:38.960 --> 00:00:41.560 +like how can we improve quality and + +00:00:42.920 --> 00:00:45.000 +and how do we define quality for this + +00:00:45.840 --> 00:00:49.600 +quality and accuracy of current computer +vision techniques + +00:00:49.960 --> 00:00:55.360 +and what are the opportunities and +what are the future directions for this + +00:00:56.560 --> 00:00:59.280 +in the in this domain? So + +00:00:59.280 --> 00:01:01.240 +we'll be joined + +00:01:01.240 --> 00:01:04.920 +by three panelists for this first panel + +00:01:05.280 --> 00:01:09.160 +Amy Pavel +from the University of Texas + +00:01:09.640 --> 00:01:12.600 +and Shivam Singh from mavQ + +00:01:13.200 --> 00:01:19.400 +and Michael Cooper from the W3C. + +00:01:19.400 --> 00:01:21.800 +And Okay, great. + +00:01:23.320 --> 00:01:27.080 +Everyone's online +and sharing their videos. + +00:01:27.080 --> 00:01:29.600 +So thank you all for agreeing to join. + +00:01:30.080 --> 00:01:33.880 +I will ask you to before +your first intervention. + +00:01:33.880 --> 00:01:37.720 +Just give a brief introduction +to yourself. + +00:01:37.720 --> 00:01:41.760 +So let people know who you are +and what you're doing. + +00:01:42.400 --> 00:01:45.960 +And I would like to start on + +00:01:47.000 --> 00:01:49.280 +one of the issues of quality. + +00:01:49.280 --> 00:01:53.600 +And as I was saying, +so how do we define quality + +00:01:54.280 --> 00:01:56.360 +here? And here + +00:01:57.520 --> 00:02:01.960 +I was looking at aspects such as how do we + +00:02:02.560 --> 00:02:06.800 +or how can we train AI models + +00:02:07.920 --> 00:02:11.520 +that are able to identify +aspects in an image + +00:02:12.120 --> 00:02:15.800 +such as identity, emotion and appearance, + +00:02:15.800 --> 00:02:20.680 +which are particular relevant +for personal images. + +00:02:20.920 --> 00:02:22.600 +So how can we + +00:02:23.760 --> 00:02:27.800 +get API to do that that we humans can do? + +00:02:27.800 --> 00:02:30.360 +So and I'll start with you, Amy. + +00:02:32.320 --> 00:02:33.200 +Excellent. + +00:02:33.200 --> 00:02:36.640 +Thank you so much. +So my name is Amy Pavel. + +00:02:36.640 --> 00:02:40.040 +I am an assistant professor at UT Austin + +00:02:40.280 --> 00:02:42.920 +in the computer science department, +and I'm super excited to be here + +00:02:43.440 --> 00:02:46.520 +because a big part of my research +is exploring how to + +00:02:47.040 --> 00:02:49.920 +create better descriptions +for online media. + +00:02:49.920 --> 00:02:54.640 +And so I work everywhere +from social media, like describing + +00:02:54.640 --> 00:02:59.760 +images on Twitter and as well as new +forms of online media like GIFs and Means. + +00:03:00.280 --> 00:03:02.080 +And I've also worked +a little bit on videos. + +00:03:02.080 --> 00:03:07.680 +So both educational videos +like making the descriptions for lectures + +00:03:08.040 --> 00:03:10.640 +as well as entertainment videos. + +00:03:10.640 --> 00:03:14.920 +So improving the accessibility of user +generated like YouTube videos, + +00:03:14.920 --> 00:03:16.000 +for instance. + +00:03:16.080 --> 00:03:19.280 +So I think this question you bring up +is really important, + +00:03:19.480 --> 00:03:21.600 +and I typically think about it +in two ways. + +00:03:21.600 --> 00:03:25.120 +So I think about what +what does our computer understand + +00:03:25.120 --> 00:03:28.560 +about an image and then how do we express + +00:03:29.200 --> 00:03:33.880 +what the computer understands +about an image and or other form of media? + +00:03:34.400 --> 00:03:37.480 +And so I think that we're +getting better and better at + +00:03:38.760 --> 00:03:39.400 +computers + +00:03:39.400 --> 00:03:42.400 +that can +understand more of the underlying image. + +00:03:42.600 --> 00:03:46.400 +For instance, we've gotten, +if we think about something like emotion, + +00:03:46.960 --> 00:03:50.880 +we've gotten a lot better +at determining exact landmarks on the face + +00:03:51.040 --> 00:03:53.440 +and how they how they move, for instance, + +00:03:54.040 --> 00:03:57.400 +or we might be able to describe +something specific about a person + +00:03:57.960 --> 00:04:00.520 +So if you look at me in this image, + +00:04:01.000 --> 00:04:07.000 +I have brown hair tied back into a bun +and a black turtleneck on and + +00:04:07.000 --> 00:04:12.720 +this is the type of thing we might be able +to understand using automated systems. + +00:04:13.200 --> 00:04:16.800 +However, the second question is kind of +how do we describe what we know + +00:04:17.320 --> 00:04:18.160 +about an image? + +00:04:18.160 --> 00:04:22.680 +And if I give you all of the information +about my facial landmarks + +00:04:22.680 --> 00:04:27.160 +and what I'm wearing for every context, +that might not be super useful. + +00:04:27.160 --> 00:04:31.520 +And so a lot of what I think about +is sort of how we can best describe + +00:04:32.320 --> 00:04:35.760 +and like what people might want to know +about an image + +00:04:35.760 --> 00:04:39.640 +given its context +and and the background of the user. + +00:04:40.800 --> 00:04:44.680 +So just briefly on that point, +I usually think about + +00:04:44.960 --> 00:04:48.640 +who is viewing this image +and what might they want to get out of it + +00:04:48.840 --> 00:04:52.560 +and also who's creating it +and what did they intend to communicate. + +00:04:53.440 --> 00:04:57.160 +So there's these two questions, I think +give us interesting ideas about what + +00:04:57.160 --> 00:05:02.200 +data we could use to train to create +better descriptions based on the context. + +00:05:03.200 --> 00:05:07.760 +So example, for example, +we might use descriptions + +00:05:07.760 --> 00:05:11.720 +that are actually given by people +to describe their their own images + +00:05:11.720 --> 00:05:16.880 +or their identities or aspects +that they've shown in videos in the past. + +00:05:16.880 --> 00:05:19.240 +On the other hand, we might improve, + +00:05:21.040 --> 00:05:22.960 +so we might use +a bunch of different methods + +00:05:22.960 --> 00:05:28.720 +and improve our ability to select a method +based on the context of the image. + +00:05:28.720 --> 00:05:32.200 +So for instance, when I worked on Twitter +images, we would run things + +00:05:32.200 --> 00:05:37.040 +like captioning to describe the image, +like an image of a a note. + +00:05:37.200 --> 00:05:40.840 +It might just say note, +but we also ran like OCR to automatically + +00:05:40.840 --> 00:05:44.600 +extract the text and tried +to pick the best strategy to give people. + +00:05:45.200 --> 00:05:49.360 +You know what we thought might be the best +amount of information given the image. + +00:05:49.560 --> 00:05:50.800 +So that's my initial. + +00:05:50.800 --> 00:05:54.640 +I'm sure more aspects of this will come up +as as we have a conversation, + +00:05:54.640 --> 00:05:57.480 +but I just wanted to give that +as my first part of my answer. + +00:05:57.480 --> 00:05:57.720 +Yeah. + +00:05:58.840 --> 00:05:59.440 +Okay. + +00:05:59.440 --> 00:06:00.720 +Thank you so much. + +00:06:01.120 --> 00:06:05.360 +So Shivam you want to go next? + +00:06:05.360 --> 00:06:07.840 +Yeah, sure. Hi everyone, I am Shivam. + +00:06:08.120 --> 00:06:13.120 +I lead the document +based products at mavQ India + +00:06:13.640 --> 00:06:16.600 +and I'm super excited +to be here in front of all of you. + +00:06:17.000 --> 00:06:21.440 +So the question here is how should we +train models that are capable + +00:06:21.440 --> 00:06:25.600 +of identifying aspects like identity, +emotion and appearance in personal images? + +00:06:26.000 --> 00:06:28.240 +So this is a two part answers. So + +00:06:29.560 --> 00:06:33.320 +I'm more in a technical background, +so I would go bit of technical + +00:06:33.400 --> 00:06:36.680 +diverse figures. +So preparing a data on diverse dataset. + +00:06:36.680 --> 00:06:38.200 +So that is the first point. + +00:06:38.200 --> 00:06:42.320 +So most of our available quality data, +it's sourced from a publicly available + +00:06:42.320 --> 00:06:43.480 +data. That's right. + +00:06:43.480 --> 00:06:47.400 +So we can carefully plan and prepare +the data before training our models + +00:06:47.840 --> 00:06:51.720 +to include the rates for peripheral +peripheral data surrounding environment. + +00:06:51.720 --> 00:06:56.560 +Like in an image, there can a subject +and there can be a lot of peripheral data. + +00:06:57.000 --> 00:07:01.720 +So if if we train, if we do the algorithm +that take care of that data as well, + +00:07:02.480 --> 00:07:04.960 +that will be helpful +in getting a better output. + +00:07:06.160 --> 00:07:08.560 +For example, you have + +00:07:08.560 --> 00:07:11.760 +like subjects gesture, +it's relation with the environment + +00:07:11.760 --> 00:07:17.800 +and it's linking emotion to its +external manifestation on a subject area. + +00:07:18.280 --> 00:07:22.120 +Now this will give us +a more inclusive output + +00:07:22.920 --> 00:07:26.040 +if and you have a subject of the user, +a person, + +00:07:26.040 --> 00:07:29.040 +then it will give you a better emotion +identity in appearance. + +00:07:29.440 --> 00:07:33.880 +And there should be a thought +where we can have a diverse dataset. + +00:07:34.240 --> 00:07:38.200 +Not but it's only depends +on availability of data. + +00:07:38.640 --> 00:07:42.600 +Now the second part of it would be +fine-tuning your model based on + +00:07:43.240 --> 00:07:44.160 +personal preferences. + +00:07:44.160 --> 00:07:47.440 +Let's say you have a better, +bigger model, right? + +00:07:47.760 --> 00:07:51.600 +And you can use that as a general model +and then you can fine tune that on + +00:07:51.680 --> 00:07:57.160 +based on a small little on small scale +trainings and smaller datasets. + +00:07:57.160 --> 00:08:00.040 +And you can continuously find +you need to get a better result. + +00:08:00.480 --> 00:08:04.880 +Now the the fine tuning +is kind of a human in the loop feature + +00:08:05.240 --> 00:08:10.360 +where every time you get a data, +you can expect some feedback on your data + +00:08:10.360 --> 00:08:13.760 +and then perform a better output of it. + +00:08:13.760 --> 00:08:18.320 +So that's something +which is a bit of... includes + +00:08:18.360 --> 00:08:22.760 +a bit of human intervention +that yeah, that's how I see. + +00:08:22.760 --> 00:08:26.720 +How can we train models? + +00:08:26.720 --> 00:08:27.520 +Hey, thank you. + +00:08:27.520 --> 00:08:35.440 +Shivam uh, Michael. So, + +00:08:36.520 --> 00:08:37.760 +Michael Cooper, I + +00:08:37.760 --> 00:08:41.880 +work with the web accessibility +initiative and I'm speaking + +00:08:42.240 --> 00:08:45.920 +specifically from my role there, +I'm not a machine learning professional, + +00:08:45.920 --> 00:08:51.400 +so I'm not speaking about technology +so much as some considerations + +00:08:51.400 --> 00:08:53.480 +for accessibility +that I'm aware of for that. + +00:08:54.600 --> 00:08:58.360 +So in terms of improving quality +of descriptions, + +00:08:59.080 --> 00:09:03.160 +the other two speakers spoke about, +you know, technically how we do it. + +00:09:04.400 --> 00:09:06.920 +I think we might be able +to give advice on that. + +00:09:06.920 --> 00:09:11.040 +Some of what needs to be done, +for instance, machine learning + +00:09:12.120 --> 00:09:13.200 +should... it’s output + +00:09:13.200 --> 00:09:16.920 +should be able to conform to the media +accessibility user requirements + +00:09:17.840 --> 00:09:19.600 +and the cognitive accessibility guidance. + +00:09:19.600 --> 00:09:23.320 +For instance, as as sources of of + +00:09:23.600 --> 00:09:26.000 +of information +about what would be useful to users. + +00:09:27.200 --> 00:09:29.240 +And I'm also thinking of + +00:09:30.480 --> 00:09:33.480 +machine learning more broadly in terms + +00:09:33.480 --> 00:09:37.280 +of what tools might be used +in these different circumstances + +00:09:37.280 --> 00:09:41.440 +and in particular contexts +as a potential assistive technology. + +00:09:42.880 --> 00:09:45.080 +And so + +00:09:46.160 --> 00:09:48.400 +the question for accessibility +there is not just + +00:09:48.400 --> 00:09:52.120 +what is the description of this image, +What is the description of this image + +00:09:52.120 --> 00:09:56.920 +in this page for me, for for the purpose +I'm seeking. + +00:09:57.640 --> 00:10:01.720 +So tools can get context +from HTML semantics, + +00:10:02.000 --> 00:10:05.320 +accessibility, semantics +like ARIA and adapt to technology. + +00:10:05.920 --> 00:10:10.840 +They can also generate their own context +from machine learning algorithms. + +00:10:10.840 --> 00:10:13.400 +But I think there is going to be a need + +00:10:13.880 --> 00:10:16.040 +to have a way to communicate + +00:10:17.040 --> 00:10:20.240 +user preferences to machine learning, +whether that is added + +00:10:20.240 --> 00:10:23.000 +to the semantics or something and + +00:10:25.640 --> 00:10:26.160 +let's see. + +00:10:26.160 --> 00:10:29.520 +So just a couple closing notes on that. + +00:10:31.000 --> 00:10:33.840 +Users need to be involved in the design +and training process. + +00:10:33.840 --> 00:10:37.120 +That's a sort of an aphorism +that needs to be repeated. + +00:10:38.560 --> 00:10:42.360 +So we have to pay attention to that +as we're looking at improving it. + +00:10:43.120 --> 00:10:46.880 +And I would also note that +while this session is mainly focused on + +00:10:47.960 --> 00:10:50.080 +images and media, + +00:10:50.080 --> 00:10:53.560 +virtual and augmented +reality has a lot of the same problems + +00:10:53.560 --> 00:11:00.160 +and solutions +that we should be looking at. + +00:11:00.160 --> 00:11:01.720 +Okay, thank you. + +00:11:01.720 --> 00:11:08.320 +to the three +for starting this discussion. + +00:11:08.320 --> 00:11:14.360 +One of the things that I guess +it was mentioned by all of you + +00:11:14.480 --> 00:11:17.760 +and different ways, it's + +00:11:17.760 --> 00:11:21.080 +the role of the end user. + +00:11:21.160 --> 00:11:24.200 +And in fact, + +00:11:24.200 --> 00:11:26.680 +I guess both + +00:11:26.680 --> 00:11:30.240 +users were mentioned, +the one that's viewing + +00:11:30.640 --> 00:11:36.360 +or acquiring the +the image or the description of the image, + +00:11:36.360 --> 00:11:41.160 +but also the one that's creating +or sharing the image. + +00:11:41.680 --> 00:11:44.640 +And and for that + +00:11:44.640 --> 00:11:49.360 +one, there is, the responsibility +of generating a description. + +00:11:49.720 --> 00:11:53.440 +And of course, +we know that most people don't do that. + +00:11:53.920 --> 00:11:57.080 +So that's why we also need this AI based + +00:11:57.080 --> 00:11:59.440 +assistance to, to, to take on that role. + +00:12:00.160 --> 00:12:05.560 +But this leads me to, +to another aspect that if we have + +00:12:06.840 --> 00:12:09.160 +an AI based system that + +00:12:09.680 --> 00:12:14.480 +is capable of assisting both the content +creator and the content consumer, + +00:12:14.480 --> 00:12:19.360 +and how can this impact +the agency of end users? + +00:12:19.360 --> 00:12:23.600 +So will end users feel that +this is no longer their responsibility + +00:12:23.600 --> 00:12:27.520 +because there’s a tool +that can do this for them? + +00:12:28.600 --> 00:12:32.000 +Or if we explore this as something that + +00:12:32.080 --> 00:12:36.120 +and now looking at this from the +the content producer perspective, + +00:12:36.640 --> 00:12:41.360 +if we if we see these tools +as something that helps someone generating + +00:12:41.360 --> 00:12:44.960 +a description, would this + +00:12:45.560 --> 00:12:49.520 +producer just start relying on the on the + +00:12:49.880 --> 00:12:52.560 +the output from the AI and here thinking + +00:12:52.560 --> 00:12:55.240 +about what Jutta was + +00:12:55.600 --> 00:12:58.640 +introducing earlier today wouldn't + +00:12:59.440 --> 00:13:02.480 +and she mentioned this +as an organizational monoculture + +00:13:02.480 --> 00:13:05.960 +but can we also think about +the description monoculture + +00:13:05.960 --> 00:13:08.800 +in which all descriptions would start + +00:13:09.560 --> 00:13:12.160 +conveying the same kind of information. So + +00:13:13.880 --> 00:13:15.760 +what are your perspectives on + +00:13:15.760 --> 00:13:20.080 +this, on the impact +that this has on the agency of end users? + +00:13:20.120 --> 00:13:23.640 +And I'll start with you Shivam now. + +00:13:23.640 --> 00:13:24.760 +Awesome, awesome. + +00:13:24.760 --> 00:13:27.440 +So it is a quite good of a question. + +00:13:27.440 --> 00:13:33.000 +So let's say we are basically talking +about the quality of our output + +00:13:33.000 --> 00:13:35.800 +based on a user, +the end user, the agency of end user. + +00:13:36.160 --> 00:13:41.120 +Now the quality of these descriptions +caption depend on how end users consume it. + +00:13:41.120 --> 00:13:44.440 +For example, most of the models +currently provide high level + +00:13:44.440 --> 00:13:47.080 +and grammatically correct +caption in English, + +00:13:47.560 --> 00:13:51.040 +but that would not be true for captions +generated in a native language of end user + +00:13:51.200 --> 00:13:54.920 +because there might not +be enough dataset to train + +00:13:54.920 --> 00:13:55.960 +our model. + +00:13:55.960 --> 00:13:59.760 +Now on the premise of training restricts +this diversity of generated captions + +00:14:00.600 --> 00:14:04.600 +and the use cases of what all things +an AI model can comprehend. + +00:14:04.920 --> 00:14:07.120 +And then the caption + +00:14:07.120 --> 00:14:11.320 +which includes like a diverse text, +like an email, a date or + +00:14:11.360 --> 00:14:14.880 +correctly explaining graphs, +which has been really a very big problem + +00:14:15.360 --> 00:14:19.400 +until now and once any translational AI + +00:14:19.520 --> 00:14:23.480 +is employed, how well it becomes an input, +it often takes more. + +00:14:23.480 --> 00:14:27.400 +So for example, you can have two different +models, one specialized in one of them + +00:14:27.680 --> 00:14:28.840 +and one general. + +00:14:28.840 --> 00:14:32.480 +Now your general output of a model +can become an input + +00:14:32.480 --> 00:14:35.000 +for any specialized model +and then you can refine it. + +00:14:35.000 --> 00:14:37.560 +This is how now we are achieving it. + +00:14:38.640 --> 00:14:39.800 +That the thing is + +00:14:39.800 --> 00:14:44.080 +the caption denoting AI consumes very large +amount of data to curate content. + +00:14:44.080 --> 00:14:46.920 +And then in many cases of live +caption generation. + +00:14:47.560 --> 00:14:51.400 +AI should put in context +the earlier events or early input as well. + +00:14:51.400 --> 00:14:54.800 +Now this is true for a context +of a conversational part, + +00:14:54.800 --> 00:15:00.000 +but this can also be thought to where you have +a live caption generation. + +00:15:00.440 --> 00:15:03.960 +So you have to put some context there and +then you have to generate the captions. + +00:15:04.360 --> 00:15:07.320 +Now we have mature +Indian's legibility, right? + +00:15:07.560 --> 00:15:09.640 +But this is more complex +than a simple image to text caption + +00:15:09.640 --> 00:15:13.800 +generation, the speed, +the attention, the handling of peripheral data + +00:15:13.840 --> 00:15:15.360 +is very much necessary + +00:15:15.360 --> 00:15:17.120 +and we have these great partnership +interpreting + +00:15:17.120 --> 00:15:20.280 +and we are looking forward +that we will have a better solution where + +00:15:20.280 --> 00:15:25.480 +end users are really satisfied +with what they're getting. + +00:15:25.480 --> 00:15:26.040 +Thanks, + +00:15:27.200 --> 00:15:28.120 +Michael. + +00:15:28.200 --> 00:15:30.440 +What about the perspective from + +00:15:32.120 --> 00:15:34.760 +the end user +or the agency of end users + +00:15:34.760 --> 00:15:37.160 +from your point of view from + +00:15:37.800 --> 00:15:40.480 +I guess the more the + +00:15:41.640 --> 00:15:45.400 +the Web accessibility initiative +and that role in + +00:15:45.400 --> 00:15:47.400 +how how can we + +00:15:49.240 --> 00:15:50.360 +guide + +00:15:51.240 --> 00:15:53.000 +technical creators to + +00:15:53.000 --> 00:15:55.880 +to ensure that end users remain with + +00:15:57.280 --> 00:15:59.480 +autonomy to to + +00:15:59.920 --> 00:16:02.680 +when creating +this kind of content. + +00:16:05.280 --> 00:16:11.440 +So you know first I would + +00:16:12.080 --> 00:16:15.800 +you know, look at you know, +what are the ways in which, + +00:16:16.640 --> 00:16:21.760 +you know, machine learning +generated descriptions and captions + +00:16:21.760 --> 00:16:28.000 +increase user agency and then there's ways +that they decrease it as well. + +00:16:28.000 --> 00:16:31.080 +So you know, for instance, + +00:16:31.080 --> 00:16:32.520 +although + +00:16:33.680 --> 00:16:35.800 +we would prefer that authors provide + +00:16:35.800 --> 00:16:39.720 +these these features, +if they don't, providing them + +00:16:39.720 --> 00:16:43.480 +via machine learning +will help the user access the page + +00:16:44.200 --> 00:16:47.960 +and, you know, give them the agency +they were looking for in their task. + +00:16:49.480 --> 00:16:50.280 +You know, the + +00:16:50.280 --> 00:16:54.040 +you know, the descriptions don't +have to be perfect to provide that agency. + +00:16:54.800 --> 00:16:58.600 +That said, it's frustrating +when they're not good enough. + +00:16:58.600 --> 00:17:00.640 +They can often mislead users + +00:17:02.200 --> 00:17:03.720 +and cause them + +00:17:03.720 --> 00:17:07.520 +to not get what they were looking +for, spend time, etc. + +00:17:08.960 --> 00:17:10.560 +So, you know, that's + +00:17:10.560 --> 00:17:13.080 +a way that this can be a risk for users. + +00:17:13.840 --> 00:17:16.640 +And, you know, as you mentioned, + +00:17:16.640 --> 00:17:21.520 +there's likely to be a tendency +for content developers to say, + +00:17:21.520 --> 00:17:23.920 +well, machine descriptions are there, so + +00:17:24.920 --> 00:17:27.920 +we don't need to worry about it + +00:17:29.080 --> 00:17:30.720 +now. So, you know, + +00:17:30.720 --> 00:17:34.560 +I think those are simply considerations +that we + +00:17:34.960 --> 00:17:38.080 +you'll have to pay attention + +00:17:38.080 --> 00:17:40.960 +to in our advocacy + +00:17:41.880 --> 00:17:43.960 +in education work in the field + +00:17:44.560 --> 00:17:47.920 +also in documenting + +00:17:49.080 --> 00:17:51.360 +best practices for machine learning. + +00:17:51.920 --> 00:17:56.920 +For instance, the W3C has a publication +called Ethical Principles + +00:17:57.280 --> 00:18:01.720 +for Web Machine Learning that, +you know, you know, talk about + +00:18:01.880 --> 00:18:05.320 +they address accessibility +considerations, among others. + +00:18:06.320 --> 00:18:07.560 +And, you know, it's + +00:18:07.560 --> 00:18:12.000 +possible that, you know, +the industry might want + +00:18:12.040 --> 00:18:17.600 +a documented set of ethical principles +or code of contact + +00:18:18.280 --> 00:18:22.040 +conduct that industry organizations +sign on to saying here's + +00:18:23.200 --> 00:18:26.600 +here's accessibility ethics in machine +learning that the + +00:18:26.720 --> 00:18:30.080 +you know, in addition to other ethics +we are paying attention to. + +00:18:30.360 --> 00:18:34.880 +So those could be ways that we can support +the growth of user agency in the end, + +00:18:34.880 --> 00:18:39.360 +the end of this, yeah. + +00:18:39.360 --> 00:18:40.200 +Thanks. + +00:18:40.200 --> 00:18:44.560 +Thank you for that perspective +and for raising awareness + +00:18:44.760 --> 00:18:46.200 +to that kind of information. + +00:18:46.200 --> 00:18:49.000 +That's the WAI group is + +00:18:50.240 --> 00:18:51.960 +is making available. + +00:18:51.960 --> 00:18:55.120 +I think that's that's really important +for everyone else to know. + +00:18:55.880 --> 00:18:59.240 +So, Amy, what's your take on this, + +00:18:59.760 --> 00:19:04.360 +the impact that these tools +can have on the agency of end users? + +00:19:05.280 --> 00:19:05.600 +Yeah. + +00:19:05.600 --> 00:19:10.360 +So I might answer this briefly +from the sort of content creator side. + +00:19:10.360 --> 00:19:12.760 +So say you are out to make a description. + +00:19:12.760 --> 00:19:14.440 +How could we use A.I. + +00:19:14.440 --> 00:19:16.560 +to improve the description? + +00:19:16.600 --> 00:19:19.720 +Improve your quality of descriptions +and the efficiency + +00:19:20.120 --> 00:19:22.440 +rather than sacrificing one for the other? + +00:19:22.960 --> 00:19:24.520 +So one. I'll start with what? + +00:19:24.520 --> 00:19:25.760 +Like I worked on tools + +00:19:25.760 --> 00:19:29.520 +a lot in this space, and so I'll kind +start with what hasn't worked in the past + +00:19:29.720 --> 00:19:32.920 +and then share some possibilities +on things that work a little bit better. + +00:19:33.760 --> 00:19:36.880 +So one thing that I've worked on for quite +a while + +00:19:36.880 --> 00:19:40.960 +has been creating user +generated descriptions of videos. + +00:19:41.680 --> 00:19:46.800 +Video descriptions currently appear mostly +in highly produced TV and film, + +00:19:46.800 --> 00:19:48.760 +and they're quite difficult + +00:19:48.760 --> 00:19:51.080 +to produce yourself +because they're sort of an art form. + +00:19:51.080 --> 00:19:54.640 +You have to fit these descriptions +within the dialog. + +00:19:54.760 --> 00:19:56.720 +They're they're really hard to make. + +00:19:56.720 --> 00:20:00.080 +So one thing we worked on +was some tools to make it easier + +00:20:00.080 --> 00:20:04.160 +for people to create video descriptions +by using A.I.. + +00:20:04.560 --> 00:20:08.960 +So what didn't work was automatically +generating these descriptions. + +00:20:09.160 --> 00:20:12.520 +The descriptions were often uninteresting +and they didn't + +00:20:12.520 --> 00:20:15.520 +provide quite the depth of... that + +00:20:15.520 --> 00:20:18.520 +the original content creator +had included in their visual, + +00:20:18.640 --> 00:20:20.160 +in the visual information in the scene. + +00:20:20.160 --> 00:20:21.680 +So if the scene was really simple, + +00:20:21.680 --> 00:20:23.680 +like just a house and a tree, +sure it might get it. + +00:20:24.240 --> 00:20:29.280 +But if it was something +that was domain specific or had something + +00:20:29.280 --> 00:20:32.280 +extra to it that you might want to share, +it was completely missing. + +00:20:32.520 --> 00:20:34.520 +And so one thing we looked at is +how we could + +00:20:34.520 --> 00:20:38.840 +identify areas where people could add +descriptions or silences or + +00:20:38.840 --> 00:20:42.120 +how we could identify things that weren't +already described in the narration. + +00:20:42.280 --> 00:20:46.600 +So at this point, +the narration of the video talks about + +00:20:47.360 --> 00:20:50.320 +is talking about something completely +unrelated to the visual content. + +00:20:50.440 --> 00:20:52.800 +So people might be missing out +on that visual content. + +00:20:53.040 --> 00:20:56.040 +So rather than trying to like, +automatically generate descriptions, + +00:20:56.040 --> 00:21:00.840 +I think promising approach +can be to identify places + +00:21:00.840 --> 00:21:05.800 +where people could put in descriptions +or if they write a description, identify + +00:21:06.040 --> 00:21:08.840 +parts of the image +that that description doesn't cover yet. + +00:21:09.040 --> 00:21:12.280 +So I think there's kind +of some cool opportunities to use + +00:21:12.400 --> 00:21:17.040 +AI in kind of unexpected ways +to help people create better descriptions. + +00:21:17.040 --> 00:21:19.840 +And then I'll briefly +address the end user part. + +00:21:20.960 --> 00:21:22.880 +You know, if if the user's lacking. + +00:21:22.880 --> 00:21:26.080 +And so the person using the captions +or the descriptions, + +00:21:26.080 --> 00:21:30.040 +if they're lacking information +that can decrease their ability + +00:21:30.040 --> 00:21:32.880 +to have agency +and responding to that information. + +00:21:32.880 --> 00:21:33.280 +Right. + +00:21:33.280 --> 00:21:35.400 +But if you give them +all of the information + +00:21:35.400 --> 00:21:39.080 +you in one big piece of alt text, +then you might not be giving people + +00:21:39.080 --> 00:21:41.360 +much agency over +what they're what they're hearing. + +00:21:41.360 --> 00:21:43.440 +You're probably not matching +with the cognitive + +00:21:43.440 --> 00:21:46.480 +accessibility guidelines +that Michael... Michael mentioned. + +00:21:47.200 --> 00:21:50.200 +And so I've experimented with some ways +to try to like + +00:21:51.240 --> 00:21:55.840 +maybe help people use get agency over +automated descriptions. + +00:21:55.840 --> 00:22:00.120 +The one thing we've played +with a little bit is, you know, asking + +00:22:00.640 --> 00:22:03.200 +basically alerting people to the fact +that there's a mismatch + +00:22:03.200 --> 00:22:04.720 +between the audio and visual. + +00:22:04.720 --> 00:22:07.040 +For instance, in listening to a lecture, + +00:22:07.040 --> 00:22:11.080 +hey, the lecturer hasn't talked about this +piece of text that's on their slide. + +00:22:11.480 --> 00:22:12.640 +Would you like to hear more about it? + +00:22:12.640 --> 00:22:15.720 +And then people can optionally hear +a little bit more about it. + +00:22:15.880 --> 00:22:18.840 +And that's, you know, something like OCR, +which automatically detects + +00:22:18.840 --> 00:22:20.120 +text, works quite well. + +00:22:20.120 --> 00:22:23.280 +So I think there's these opportunities +that you don't want to overwhelm people + +00:22:23.280 --> 00:22:25.800 +with information when they're doing a task +that's not related. + +00:22:25.800 --> 00:22:27.440 +But there are some cool opportunities, + +00:22:27.440 --> 00:22:31.240 +I think, to like give people control over +when they get more information. + +00:22:31.240 --> 00:22:36.520 +Yeah, Okay. + +00:22:37.240 --> 00:22:39.680 +Just and thanks for that, Amy. Also, + +00:22:41.320 --> 00:22:44.200 +just before moving to the next question + +00:22:44.200 --> 00:22:47.880 +that I had here, Matt Campbell + +00:22:49.120 --> 00:22:51.680 +asked a follow up question on this. + +00:22:52.240 --> 00:22:57.480 +So and it's about what you just mentioned, +Michael So you mentioned + +00:22:57.480 --> 00:23:01.640 +that descriptions not being good +enough are a risk for user agency. + +00:23:01.920 --> 00:23:06.240 +And what Matt's inquiring is +how much can this be + +00:23:06.240 --> 00:23:10.960 +mitigated by just tagging the descriptions +as automatically generated? + +00:23:10.960 --> 00:23:13.720 +So to + +00:23:15.480 --> 00:23:18.160 +give a perspective on this and also Amy + +00:23:18.160 --> 00:23:22.120 +if you if you want to make. + +00:23:22.120 --> 00:23:24.280 +A try to give a quick answer. + +00:23:25.360 --> 00:23:30.520 +So is the the ARIA technology, +accessible rich Internet applications + +00:23:30.520 --> 00:23:37.040 +technology, enhances HTML +with the ability to point to a description + +00:23:37.040 --> 00:23:40.880 +elsewhere in the HTML document +rather than providing a simple alt text + +00:23:41.200 --> 00:23:45.080 +And that gives you +the rich HTML capability. + +00:23:45.680 --> 00:23:49.840 +So so we have that now +in terms of identifying it as a machine + +00:23:50.320 --> 00:23:53.840 +generated description, +we don't have a semantic for that, + +00:23:53.840 --> 00:23:57.240 +but you know, that's the sort of thing +that would get added to ARIA + +00:23:57.240 --> 00:24:01.640 +if the use cases were emerging. + +00:24:01.640 --> 00:24:02.080 +Yeah. + +00:24:02.080 --> 00:24:06.280 +So I will also I'm happy +to also answer this question. + +00:24:06.280 --> 00:24:09.920 +Well maybe I was looking at maps +other question which is kind of related. + +00:24:09.920 --> 00:24:10.600 +I think so. + +00:24:10.600 --> 00:24:14.240 +Are there other alternatives +that are richer than alt text alone? + +00:24:15.000 --> 00:24:17.160 +One thing we've looked +at a little bit for, + +00:24:18.000 --> 00:24:21.240 +I've worked a little bit +on the accessibility of complex scientific + +00:24:21.240 --> 00:24:25.400 +images, and what you end up with +are these like complex multipart diagrams + +00:24:25.400 --> 00:24:28.240 +that if you try to describe in +like one single, + +00:24:29.080 --> 00:24:31.760 +you know, alt text field, it performs +quite badly. + +00:24:31.760 --> 00:24:35.200 +So we're kind of starting to see like, +oh, could we automatically + +00:24:35.800 --> 00:24:39.080 +break that big piece +of alt text down into a hierarchy + +00:24:39.480 --> 00:24:42.160 +to match the image +so that maybe people can more flexibly + +00:24:42.960 --> 00:24:45.640 +explore +like they would basically an HTML version + +00:24:46.360 --> 00:24:49.640 +that sort of captures the structure +of the image that people could explore. + +00:24:49.640 --> 00:24:50.760 +So kind of trying + +00:24:50.760 --> 00:24:54.400 +to think about some other ways to present +all the information that currently gets + +00:24:54.400 --> 00:24:58.360 +relegated sometimes to a single alt text +into something that's a little more rich. + +00:24:58.360 --> 00:25:05.680 +Yeah. + +00:25:05.680 --> 00:25:08.440 +Carlos, you're on mute. Sorry. Thanks. + +00:25:09.760 --> 00:25:12.080 +Uh, and + +00:25:12.080 --> 00:25:15.280 +what I was +saying is that since we have been coming + +00:25:15.280 --> 00:25:20.200 +always around to the topic of +or to the concept of quality, + +00:25:21.040 --> 00:25:24.040 +also, when questioned by Mark, + +00:25:24.400 --> 00:25:26.440 +Mark Urban I think it's + +00:25:27.120 --> 00:25:29.760 +it would be rather interesting to know + +00:25:29.760 --> 00:25:32.560 +what's your take on this. So + +00:25:33.080 --> 00:25:38.440 +is there a documented metric that measures +the quality of an image description? + +00:25:38.800 --> 00:25:41.680 +And if if there is so, what? + +00:25:42.280 --> 00:25:48.160 +What would be the most important +priorities for defining quality? + +00:25:49.080 --> 00:25:52.760 +Amy, you want to go first? + +00:25:52.760 --> 00:25:55.480 +This is a hard question for me +because I think the answer is no. + +00:25:55.960 --> 00:25:59.680 +But it's really it's a really good it's +a really good question + +00:25:59.680 --> 00:26:03.520 +and something that we constantly +sort of battle with. + +00:26:04.120 --> 00:26:07.760 +So we kind of abuse in our work, +you know, a four point description + +00:26:07.760 --> 00:26:11.720 +that's like no description, like literally +nothing, you know, one that's like + +00:26:11.720 --> 00:26:14.800 +there's something in the description +field, but it's in no way related. + +00:26:15.400 --> 00:26:19.000 +There is something related to the image, +but it's missing some key points. + +00:26:19.000 --> 00:26:22.000 +And this covers most of the key points +in the image and we've kind of been + +00:26:22.000 --> 00:26:27.480 +using this, but the what those values +mean depends a lot on the domain and what + +00:26:28.640 --> 00:26:30.840 +what task +the person is using the image for. + +00:26:30.840 --> 00:26:33.960 +But it's been like... you know we've +we've used this in a couple of papers + +00:26:33.960 --> 00:26:38.120 +and it's just been like a way for us to, +you know, make progress on this problem. + +00:26:38.120 --> 00:26:41.600 +And we've also tried to for each domain +we're working in, kind of tried to inform + +00:26:41.600 --> 00:26:44.640 +it based on existing guidelines +as well as like, you know, + +00:26:44.640 --> 00:26:48.400 +the literally the existing W3C guidelines +as well as like + +00:26:48.400 --> 00:26:51.720 +what users +have told us about specific to that domain. + +00:26:51.880 --> 00:26:53.200 +But I don't know of a good one. + +00:26:53.200 --> 00:26:54.600 +And that's something that like + +00:26:54.600 --> 00:26:58.360 +we just sort of worked around, +but I think it would be great to have more + +00:26:58.600 --> 00:27:00.520 +efforts on that in the future. + +00:27:00.520 --> 00:27:05.320 +Yeah, definitely something that's +been more qualitative than quantitative. + +00:27:05.320 --> 00:27:06.040 +Definitely. + +00:27:06.040 --> 00:27:08.720 +That's that's what you just described. + +00:27:08.720 --> 00:27:10.000 +It's a good way to start. + +00:27:10.000 --> 00:27:14.280 +So Shivam, in your take +on the quality of image description. + +00:27:15.160 --> 00:27:19.360 +I shure, +so I guess in when we come to industry + +00:27:19.760 --> 00:27:22.440 +set up, right, +we have certain evaluation tools. + +00:27:23.360 --> 00:27:26.840 +We evaluate our models +as well as some of the outputs, there’s + +00:27:26.840 --> 00:27:28.760 +a rigorous testing that goes on. + +00:27:28.760 --> 00:27:32.200 +But there's no set of metrics +that we have. + +00:27:32.640 --> 00:27:36.440 +But certainly we have some rules, +we have the W3C guidelines, we have + +00:27:36.920 --> 00:27:39.720 +we have some other guidelines as well +that that are in place. + +00:27:40.440 --> 00:27:41.480 +There are not set rules. + +00:27:41.480 --> 00:27:47.120 +But yeah, we have those as a yardstick +and we can really test based on that only. + +00:27:47.400 --> 00:27:49.400 +So there can be some work done with there. + +00:27:49.400 --> 00:27:54.880 +But yeah, certainly +this is what we have currently. + +00:27:54.880 --> 00:27:56.680 +Okay. Okay. Yeah. And Michael + +00:27:57.680 --> 00:28:01.000 +hey, Amy just mentioned in her answer. + +00:28:01.400 --> 00:28:06.040 +Looking also at the definitions +that the W3C provided... they’re provided. + +00:28:06.040 --> 00:28:08.200 +So do you want to add something on? + +00:28:08.200 --> 00:28:13.240 +How can we measure +quality of image descriptions? + +00:28:13.240 --> 00:28:16.440 +The only thing I would really add to +what she said is so. + +00:28:16.560 --> 00:28:19.960 +So we produce resources +like understanding WCAG, + +00:28:21.080 --> 00:28:23.440 +understanding the web content +accessibility guidelines, which + +00:28:24.440 --> 00:28:28.280 +go into when you're writing image +descriptions, what are the considerations? + +00:28:28.280 --> 00:28:30.640 +How would you make a good one? + +00:28:30.720 --> 00:28:33.320 +And one of the big challenges +I think for machine + +00:28:33.320 --> 00:28:37.480 +learning in particular +is that the quality, + +00:28:38.120 --> 00:28:41.880 +the appropriate description for an image +will depend very much on its context. + +00:28:42.360 --> 00:28:45.400 +We describe several different contexts +in the guide, + +00:28:45.400 --> 00:28:49.520 +in the support materials, +and yeah, those are the + +00:28:49.720 --> 00:28:53.320 +the right description for one +is the wrong one for another. + +00:28:53.320 --> 00:28:56.440 +So sorting that out I think is going to be +one of the big challenges + +00:28:56.680 --> 00:29:00.280 +beyond what others have said. + +00:29:00.280 --> 00:29:01.480 +Yeah, definitely. + +00:29:01.480 --> 00:29:05.840 +I have to agree with you +that apparently we're losing + +00:29:06.400 --> 00:29:09.960 +Shivam intermittently, +but okay, he is back on that. + +00:29:10.520 --> 00:29:10.840 +Okay. + +00:29:10.840 --> 00:29:14.800 +And I'm going to combine +two questions that + +00:29:14.800 --> 00:29:17.480 +that we have here in the Q&A. + +00:29:18.240 --> 00:29:21.160 +The one from Jan Benjamin + +00:29:21.160 --> 00:29:23.440 +and the other one from Wilco Fiers. + +00:29:23.440 --> 00:29:26.480 +So this is most it's more about + +00:29:28.800 --> 00:29:30.760 +qualifying images + +00:29:30.760 --> 00:29:34.280 +than really generating +descriptions for, for the image. + +00:29:34.680 --> 00:29:36.760 +So Jan asks if + +00:29:38.320 --> 00:29:40.880 +can AI differentiate between, + +00:29:41.320 --> 00:29:44.080 +for example, functional +and decorative images + +00:29:44.080 --> 00:29:47.680 +instead of generating a description, +just differentiating between + +00:29:49.160 --> 00:29:51.800 +an image that needs a description +and one that doesn't? + +00:29:51.800 --> 00:29:54.400 +And Wilco asks if + +00:29:55.400 --> 00:29:59.680 +if it's viable to spot images +where automated captions + +00:29:59.920 --> 00:30:04.920 +will likely be sufficient so that content +authors can focus on those + +00:30:04.960 --> 00:30:07.400 +and leave the AI to + +00:30:07.960 --> 00:30:11.520 +to to caption to describe others +that might be easier for them. + +00:30:11.520 --> 00:30:15.520 +So, Amy, would you go first? + +00:30:16.440 --> 00:30:17.760 +Sure. Yeah. + +00:30:17.760 --> 00:30:19.880 +So I love both of these questions. + +00:30:20.400 --> 00:30:24.040 +So I would say to Jen's question, +I don't think, + +00:30:24.040 --> 00:30:27.640 +you know, I guess +when when the question is, can AI do this? + +00:30:27.880 --> 00:30:32.320 +You know, I have we've tried this +a little bit, four slide presentations. + +00:30:32.320 --> 00:30:33.640 +And the answer is yes. + +00:30:33.640 --> 00:30:35.640 +To some extent +it's going to fail some places. + +00:30:35.640 --> 00:30:38.200 +But just to give +you kind of an idea of how, + +00:30:38.760 --> 00:30:42.440 +you know, +AI could maybe help detect decorative + +00:30:42.440 --> 00:30:44.680 +from non decorative +from more informative images + +00:30:44.800 --> 00:30:47.720 +like in the context of a slide +presentation is you know, + +00:30:47.720 --> 00:30:51.520 +informative images might be more complex, +they might be more related + +00:30:51.520 --> 00:30:56.080 +to the content on the rest of the slide +and in the narration. Informative images, + +00:30:56.720 --> 00:31:00.560 +they might be larger on the screen, +whereas decorative images and slides + +00:31:01.000 --> 00:31:04.720 +might be like, you know, +like little decorations on the side. + +00:31:04.720 --> 00:31:08.680 +They might be logos or, or like emojis +or less + +00:31:08.680 --> 00:31:12.240 +related to less related +to the content on the on the screen. + +00:31:12.240 --> 00:31:13.360 +So what we found out is + +00:31:13.360 --> 00:31:17.160 +we can do a decent job at this, +but it will fail in some cases. + +00:31:17.160 --> 00:31:17.720 +Always like, + +00:31:17.720 --> 00:31:21.320 +you know, maybe an image is included, +but there's no other information about it. + +00:31:21.680 --> 00:31:23.720 +And and so it's it's tricky. + +00:31:23.720 --> 00:31:27.080 +I think in doing this, +you would want to be overly inclusive + +00:31:27.080 --> 00:31:29.440 +of the images +that you identified as informative. + +00:31:30.280 --> 00:31:33.760 +So so that maybe you could help content +authors make sure that they at least + +00:31:33.760 --> 00:31:35.160 +review most of the images. + +00:31:36.400 --> 00:31:37.120 +And then I would say + +00:31:37.120 --> 00:31:41.040 +to Wilco, +I yeah, I think that's a great idea. + +00:31:41.040 --> 00:31:43.040 +We've tried it a little bit on Twitter. + +00:31:43.040 --> 00:31:46.120 +So one time we ran +basically a bunch of different + +00:31:47.280 --> 00:31:49.360 +AI methods +to try to describe images on Twitter. + +00:31:49.880 --> 00:31:53.680 +And so for each image +we would try to run captioning OCR + +00:31:54.360 --> 00:31:57.800 +we did like this URL tracing to see +if we could find a caption elsewhere + +00:31:57.800 --> 00:32:02.480 +on the web and basically +if all of those had like low confidence or + +00:32:03.200 --> 00:32:06.760 +or they didn't return anything, +then we kind of automatically sent + +00:32:07.000 --> 00:32:11.800 +the image for it to get more human +written descriptions and another thing + +00:32:11.800 --> 00:32:15.760 +we explored with is like users optionally +like retrieving that description. + +00:32:15.760 --> 00:32:16.840 +So I think it's possible. + +00:32:16.840 --> 00:32:19.960 +I think that the like +the subtleties, there's subtleties there + +00:32:19.960 --> 00:32:21.680 +that would be really difficult +to do automatically. + +00:32:21.680 --> 00:32:25.360 +But, but at least that was the way, given +how many images were on Twitter without + +00:32:25.680 --> 00:32:28.360 +description, that was sort of a way +for us to filter out the ones + +00:32:28.360 --> 00:32:31.360 +where we definitely needed +to get more information from a human. + +00:32:31.360 --> 00:32:33.760 +Yeah, great. + +00:32:34.080 --> 00:32:37.800 +Thanks for sharing +those experiences. Shivam... + +00:32:39.800 --> 00:32:41.600 +Yeah, I guess + +00:32:41.600 --> 00:32:44.760 +I have had an encounter with this scenario +where + +00:32:45.920 --> 00:32:48.040 +I had to get descriptions of images that + +00:32:48.040 --> 00:32:51.760 +most likely not get very sufficient +on a machine description. + +00:32:52.080 --> 00:32:55.680 +So there are always +there are tools that can do that for you + +00:32:55.680 --> 00:32:57.440 +there on websites. + +00:32:57.440 --> 00:32:59.960 +I think there are multiple plugins +that you can use. + +00:33:00.360 --> 00:33:04.320 +You can get certain descriptions +and people can put certain human descriptions + +00:33:04.320 --> 00:33:05.080 +out there + +00:33:05.920 --> 00:33:09.040 +to mark them, +to spot them in a scalable manner. + +00:33:09.040 --> 00:33:12.080 +It sometimes doesn't become scalable +and that's the whole issue. + +00:33:12.320 --> 00:33:13.280 +You can have a tool. + +00:33:13.280 --> 00:33:17.120 +It might not be scalable for every user +out there, every website out there. + +00:33:17.120 --> 00:33:19.600 +So this can be done. + +00:33:19.600 --> 00:33:23.400 +But yeah, again, there are some things +that where it can used it can't. + +00:33:24.400 --> 00:33:27.520 +So there's certainly +this technology is the answer. How to scale + +00:33:27.520 --> 00:33:30.760 +it is the question. + +00:33:31.360 --> 00:33:32.560 +Great, thanks Shivam. + +00:33:34.120 --> 00:33:36.480 +Michael do you have any input on this. + +00:33:37.280 --> 00:33:38.920 +No not on this one. Yeah. + +00:33:38.920 --> 00:33:40.000 +Okay. + +00:33:40.000 --> 00:33:46.440 +Um, that, that takes me back +to one question that I had here. + +00:33:46.440 --> 00:33:50.000 +Uh, and, uh, I think these opportunities + +00:33:50.320 --> 00:33:52.840 +to go back there +and I will start with you, Michael. + +00:33:53.400 --> 00:33:55.520 +Uh, it's, uh, + +00:33:56.080 --> 00:33:59.400 +going in a different direction +from what we have been going so far, + +00:33:59.800 --> 00:34:02.000 +but How do you think that + +00:34:03.000 --> 00:34:05.200 +we need to deal with + +00:34:05.440 --> 00:34:08.920 +legal copyright and responsibility issues + +00:34:08.920 --> 00:34:11.720 +when generating descriptions + +00:34:12.120 --> 00:34:16.560 +with AI-based models? + +00:34:16.840 --> 00:34:21.560 +How do we tackle that? + +00:34:21.560 --> 00:34:22.520 +Yeah. + +00:34:22.800 --> 00:34:23.200 +Okay. + +00:34:23.200 --> 00:34:26.440 +So, you know, also, we're not speaking + +00:34:26.440 --> 00:34:29.320 +as a legal professional, +but the issues that I know + +00:34:30.880 --> 00:34:36.160 +in general, at least for accessibility, +there is often a fair use. + +00:34:36.160 --> 00:34:38.080 +The right to transform content. + +00:34:38.080 --> 00:34:43.000 +But to circle back to that, + +00:34:43.000 --> 00:34:47.080 +you know, so you know our priority +but that's my first answer. + +00:34:47.080 --> 00:34:49.080 +But then there are issues around accuracy. + +00:34:50.800 --> 00:34:52.600 +So, you know, if + +00:34:52.600 --> 00:34:55.520 +a machine has generated a caption + +00:34:55.640 --> 00:34:59.480 +or description, you know, how accurate +is that description? + +00:34:59.520 --> 00:35:01.840 +Who knows how accurate it is? + +00:35:01.840 --> 00:35:05.920 +You know, and also publishing it, +especially with potential inaccuracies, + +00:35:06.320 --> 00:35:08.760 +can bring on, +you know, liability consequences, + +00:35:09.040 --> 00:35:12.040 +even if very useful as otherwise. + +00:35:12.040 --> 00:35:15.160 +Allowing that publication is. + +00:35:15.160 --> 00:35:18.320 +So another challenge is + +00:35:19.520 --> 00:35:21.080 +meeting requirements. + +00:35:21.080 --> 00:35:26.280 +You know, if the accuracy is high, +pretty high, but still not quite right. + +00:35:26.280 --> 00:35:29.600 +If it's a legal document, +it might not be sufficient. + +00:35:29.880 --> 00:35:34.360 +So either, depending on the accuracy +of these kinds of descriptions, + +00:35:34.360 --> 00:35:35.560 +is going to be a big, + +00:35:35.560 --> 00:35:38.560 +you know, legal challenge, I think, +from a bunch of different directions. + +00:35:39.880 --> 00:35:42.920 +You know, of course there is the benefit, +the reason to do it, + +00:35:43.120 --> 00:35:46.360 +and this can still be better than nothing +for many users, + +00:35:47.000 --> 00:35:50.080 +you know, +who get used to some of the inaccuracies. + +00:35:50.720 --> 00:35:52.760 +And it does provide scalability, + +00:35:53.120 --> 00:35:57.000 +you know, you know, given how image +and video focused our web has become. + +00:35:58.600 --> 00:35:59.880 +So I would + +00:35:59.880 --> 00:36:00.680 +highlight one of + +00:36:00.680 --> 00:36:04.520 +the ethical principles from the other goal +machine learning document, which is that + +00:36:04.520 --> 00:36:08.680 +it should be clear that machine, +the content is machine generated + +00:36:09.040 --> 00:36:12.320 +that allows many actors to evaluate, + +00:36:13.280 --> 00:36:17.760 +evaluate it and then, +you know, circling back to fair use, + +00:36:18.400 --> 00:36:22.960 +I think who is doing the generating +or publishing + +00:36:23.160 --> 00:36:27.920 +of of machine learning content +will probably impact that. If it's a user + +00:36:27.920 --> 00:36:31.960 +agent and assistive technology +probably is covered by fair use. + +00:36:32.800 --> 00:36:35.400 +And if the content producer is doing it, + +00:36:36.200 --> 00:36:40.920 +you know, they probably are declaring fair +use for themselves. + +00:36:41.320 --> 00:36:45.400 +But the responsibility for accuracy +will be higher for them + +00:36:46.000 --> 00:36:48.400 +because they are now the publisher. + +00:36:49.040 --> 00:36:52.480 +And then there are, +you know, third party agents of various + +00:36:52.480 --> 00:36:54.880 +sorts accessibility remediation tools, + +00:36:56.440 --> 00:36:58.720 +other other sorts + +00:36:58.720 --> 00:37:04.640 +where I assume it's a legal Wild West. + +00:37:04.640 --> 00:37:05.720 +Yeah, definitely. + +00:37:05.720 --> 00:37:09.960 +And to make it worse, +I guess there are many Wild West + +00:37:09.960 --> 00:37:14.080 +because every every country, +every region might have different + +00:37:14.240 --> 00:37:15.600 +legal constraints there. + +00:37:16.600 --> 00:37:17.480 +Shivam, + +00:37:18.120 --> 00:37:19.720 +any take on this? + +00:37:19.960 --> 00:37:20.560 +Yeah. + +00:37:20.560 --> 00:37:23.960 +So I have a holistic view +of how technical this has been. + +00:37:24.160 --> 00:37:27.880 +This was when this is an ongoing issue +with a lot of countries now. + +00:37:28.360 --> 00:37:31.200 +So you see almost all publicly available +data sets, right... + +00:37:31.680 --> 00:37:35.600 +These are the data that are associated in +some or other form of copyright one. + +00:37:35.760 --> 00:37:36.200 +Right. + +00:37:36.400 --> 00:37:39.640 +And although there is no frame, +most of the part of what + +00:37:40.280 --> 00:37:43.360 +deals with the legality of AI generated +captions, I mean, + +00:37:43.360 --> 00:37:46.880 +there is no written law with any place +of what currently it might come later. + +00:37:47.200 --> 00:37:52.280 +Maybe in US first, is just so this is a complexity +of some other complexity. + +00:37:52.280 --> 00:37:55.640 +Also like owning of AI generated... +who would own that data, right? + +00:37:55.640 --> 00:37:59.000 +I mean, if it's a machine generated +data, who would be owning the + +00:37:59.040 --> 00:38:03.080 +the industry that has built that model +or the dataset that has been + +00:38:03.640 --> 00:38:05.200 +gathered from different data sources. + +00:38:05.200 --> 00:38:07.480 +Now, this is a very complex challenge. + +00:38:07.480 --> 00:38:10.480 +The other part of it is +how would you fix the responsibility? + +00:38:10.840 --> 00:38:13.600 +But with that in mind, +if it depends on the end user of the + +00:38:13.600 --> 00:38:14.840 +ML model, when you use that, + +00:38:15.840 --> 00:38:17.280 +in what context are you using? + +00:38:17.280 --> 00:38:20.200 +I mean, well, for example, some +some of the models are used in + +00:38:20.400 --> 00:38:21.400 +Academy, right. + +00:38:21.400 --> 00:38:24.520 +I know these are just for research +and development purposes. + +00:38:24.760 --> 00:38:28.440 +There is no way where you can + +00:38:28.480 --> 00:38:31.640 +fix the responsibility +on an academy of an ML output. + +00:38:31.640 --> 00:38:32.080 +Right. + +00:38:32.160 --> 00:38:34.960 +So these are the this this helps in two ways + +00:38:35.520 --> 00:38:38.200 +like there +is how you're sourcing the data. + +00:38:38.520 --> 00:38:42.280 +Either you have to get the figures +on the data, where it is coming from. + +00:38:42.280 --> 00:38:46.320 +You, you, you gather +your data based on written sources. + +00:38:46.320 --> 00:38:49.360 +You have a mutual understanding +between the data generator + +00:38:49.360 --> 00:38:51.840 +creator and you, +and then you train on the data. + +00:38:51.960 --> 00:38:55.080 +But that gives you a complexity +where you have very small data + +00:38:55.280 --> 00:38:57.880 +and there is a large input going +and training your data. + +00:38:58.120 --> 00:39:01.640 +So yeah, these are the complexity +currently, but yeah, it's all depends on + +00:39:01.640 --> 00:39:04.560 +where the ML model +or the output is being used + +00:39:05.120 --> 00:39:07.160 +and that's +where the fair use policy comes. + +00:39:09.560 --> 00:39:13.040 +Context all the way in all scenarios, +right? + +00:39:14.160 --> 00:39:16.600 +Amy? Yeah, + +00:39:16.600 --> 00:39:21.200 +So I am not as familiar with, kind of like +the legal and copyright side of this, + +00:39:21.200 --> 00:39:26.080 +but I do think, you know, oftentimes +I do think about like the responsibility + +00:39:26.080 --> 00:39:28.440 +aspects of the captions +that we're generating, + +00:39:28.440 --> 00:39:31.960 +especially when we're doing these +kind of like new forms of it + +00:39:32.000 --> 00:39:34.840 +where we're generating things +for like user generated media. + +00:39:35.000 --> 00:39:37.120 +And I think this more goes back to the + +00:39:38.120 --> 00:39:41.200 +to potential harms +brought up in the keynote. + +00:39:41.480 --> 00:39:45.880 +So so for instance, like I guess one thing +I often am thinking about is like + +00:39:46.280 --> 00:39:50.920 +when are errors not that big of a deal +and when are they a bigger deal? + +00:39:50.920 --> 00:39:54.640 +And then, you know, kind of trade +looking at their risks and trade offs + +00:39:54.640 --> 00:39:59.000 +in terms of like who like who's +receiving the image and who's or who's + +00:39:59.000 --> 00:40:02.840 +getting identified by the the tool +and who is receiving the image. + +00:40:03.720 --> 00:40:08.800 +So, for instance, +if I misidentified my shirt as dark blue + +00:40:08.800 --> 00:40:12.600 +instead of black, this error is unlikely +to be as harmful to me, + +00:40:12.920 --> 00:40:15.120 +but for some people might experience + +00:40:15.640 --> 00:40:18.400 +misgendering them with image +classification to be harmful. + +00:40:18.600 --> 00:40:21.280 +And so I guess two ways +I've seen with dealing with this. + +00:40:22.120 --> 00:40:26.320 +You know, not to say that +either of them is good right now. + +00:40:26.640 --> 00:40:29.760 +So one is like +I think a lot of tools actually back off + +00:40:29.760 --> 00:40:32.640 +to saying person instead of woman or man. + +00:40:33.280 --> 00:40:37.160 +And another way that you could +imagine doing it is also like describing + +00:40:37.480 --> 00:40:41.200 +physical characteristics of the person +that are less subjective. + +00:40:41.400 --> 00:40:46.120 +And a final way you might imagine doing +it is like take... is considering people's + +00:40:46.120 --> 00:40:49.480 +own identifications +of how they would like to be described, + +00:40:49.840 --> 00:40:51.680 +and sometimes that varies +in different contexts. + +00:40:51.680 --> 00:40:54.000 +So I think that's itself a hard problem. + +00:40:54.000 --> 00:40:56.920 +But yeah, I don't have much to say +on the legal or copyright side. + +00:40:56.920 --> 00:40:58.120 +I just wanted to bring up that. + +00:40:58.120 --> 00:41:00.440 +That's something +that's come up in my work before. Yeah. + +00:41:01.520 --> 00:41:02.080 +Okay. + +00:41:02.120 --> 00:41:03.440 +Thank you so much. + +00:41:03.440 --> 00:41:06.400 +I think we're almost at the end. + +00:41:06.400 --> 00:41:11.960 +We have less than 10 minutes, but +and questions keep coming, which is great. + +00:41:11.960 --> 00:41:16.360 +So you will have the opportunity, +I guess, to to try to answer somewhat, + +00:41:16.360 --> 00:41:20.560 +some of them offline if you if you wish +to, But I'll still take another one. + +00:41:20.720 --> 00:41:24.240 +The last one that we have +here from Antonio Gambabari, + +00:41:24.760 --> 00:41:27.320 +and I think it's + +00:41:27.320 --> 00:41:31.640 +that the question is how do you envision +the challenges of explainable A.I. + +00:41:31.640 --> 00:41:34.360 +initiatives +in the context of image recognition? + +00:41:34.360 --> 00:41:34.840 +Right. + +00:41:34.880 --> 00:41:38.400 +And I think this relates +to several of the aspects + +00:41:38.680 --> 00:41:42.320 +that we've dealt with, +with the uncertainty of images + +00:41:42.320 --> 00:41:48.120 +and how do we convey that to users +even just by labeling + +00:41:48.120 --> 00:41:52.600 +something as automatically generated +would be a way to convey that. + +00:41:52.960 --> 00:41:56.200 +But do you think that explainable A.I. + +00:41:56.200 --> 00:42:00.080 +initiatives +have the potential to improve this kind of + +00:42:02.520 --> 00:42:04.720 +augmented context for the user? + +00:42:04.720 --> 00:42:08.240 +And where did the description came from? + +00:42:08.680 --> 00:42:12.040 +And this time, I'll start with you Shivam. + +00:42:12.040 --> 00:42:15.400 +I think yes, and it is a good point. + +00:42:15.400 --> 00:42:18.760 +Explainable AI initiative deals with how + +00:42:19.960 --> 00:42:23.880 +metadata can help +the end user to know the context of + +00:42:23.880 --> 00:42:27.680 +what is being generated, any quantitative +score on any of the models. + +00:42:27.720 --> 00:42:33.040 +It is supported by a lot of data +that goes beyond your training data. + +00:42:33.880 --> 00:42:37.720 +There is a distinction, though, +that whatever things you are getting + +00:42:37.720 --> 00:42:41.200 +an output, right, the metadata can +there are multiple layers of training. + +00:42:41.200 --> 00:42:43.320 +If you look into training, +there are multiple layers of training. + +00:42:43.320 --> 00:42:46.960 +So how that decision has been made +by an AI, it can give you + +00:42:46.960 --> 00:42:49.720 +a certain level of metadata, but not all. + +00:42:50.080 --> 00:42:53.800 +So yeah, it can augment the user, +but that won't be the complete solution. + +00:42:53.800 --> 00:42:57.880 +But that's how I see. + +00:42:57.880 --> 00:42:58.320 +Amy, + +00:42:59.600 --> 00:43:01.120 +any thoughts on this? + +00:43:01.120 --> 00:43:03.480 +Yeah, so that that's a good question. + +00:43:03.480 --> 00:43:05.840 +I don't, I don't know. + +00:43:05.840 --> 00:43:10.000 +So I think some things that I've, +I've seen + +00:43:11.040 --> 00:43:13.880 +so, so one thing I would think about +a little bit in this is in + +00:43:13.960 --> 00:43:16.520 +and I've had to think about +before is is sort of like + +00:43:17.040 --> 00:43:20.880 +the tradeoff between receiving information +efficiently + +00:43:20.880 --> 00:43:24.400 +and explaining where you got +all of that information from. + +00:43:25.120 --> 00:43:28.320 +And I think both are important +and I think maybe + +00:43:29.080 --> 00:43:31.600 +like I think +what my experience has been is that users + +00:43:31.600 --> 00:43:35.280 +are used to certain types of errors +and can recover from them quickly. + +00:43:35.400 --> 00:43:37.560 +So for instance, + +00:43:37.600 --> 00:43:40.600 +like when when a user's +reviewing their own content, for example, + +00:43:40.600 --> 00:43:45.400 +they took pictures or video and they hear +something described is a leash. + +00:43:45.400 --> 00:43:47.920 +I have had the experience of users +being like, Oh no, that's my cane. + +00:43:48.040 --> 00:43:50.120 +Like it always calls my cane a leash. So. + +00:43:50.120 --> 00:43:53.800 +So I think in some cases, +like people can get like can get used + +00:43:53.800 --> 00:43:58.080 +to identifying the errors for the, +for the like known unknowns. + +00:43:58.080 --> 00:44:00.520 +So this is just like a wrong +identification, I'm used to it. + +00:44:00.640 --> 00:44:04.160 +And I do think it's harder to recover +from areas that are like unknown unknowns. + +00:44:04.160 --> 00:44:07.240 +You don't have any other context about it, +so you're not sure what else it would be. + +00:44:07.360 --> 00:44:11.320 +And I think in maybe those those cases +where users haven't identified it before, + +00:44:11.840 --> 00:44:15.800 +that that confidence information +would be like extra important and so yeah, + +00:44:15.800 --> 00:44:17.920 +I'm not really sure what the answer is, +but I think that like + +00:44:18.040 --> 00:44:22.880 +considering the balance between +what is the what's important and to know + +00:44:22.880 --> 00:44:26.760 +more information about will will be like +a tricky design question as well as + +00:44:27.920 --> 00:44:31.160 +a question for how to develop technology. + +00:44:31.280 --> 00:44:31.880 +Okay, great. + +00:44:31.880 --> 00:44:32.440 +Thanks. + +00:44:32.440 --> 00:44:35.840 +And Michael, any any input on this one? + +00:44:36.520 --> 00:44:39.640 +So I would just add to all that that, +you know, + +00:44:39.640 --> 00:44:44.440 +this again, falls into the question of +of ethics, transparency + +00:44:44.440 --> 00:44:47.920 +and Explainability is one of the sections +of the machine learning Ethics + +00:44:48.560 --> 00:44:51.360 +is intended for several aspects of it. + +00:44:51.480 --> 00:44:54.080 +You should know how the machine learning +was built. + +00:44:54.080 --> 00:44:56.560 +It should be auditable for various issues. + +00:44:56.800 --> 00:45:00.920 +These ethics are probably less specific +to some of the use cases + +00:45:00.920 --> 00:45:04.920 +we're discussing in this symposium, +so there might be room for adding + +00:45:05.000 --> 00:45:08.440 +to this section of the document. + +00:45:08.440 --> 00:45:09.400 +Yeah. Yeah. + +00:45:09.400 --> 00:45:11.840 +I think that might be a good idea. + +00:45:11.840 --> 00:45:15.040 +And I'll I'll take just the final one + +00:45:16.360 --> 00:45:19.760 +and I'll go back to the topic +and one from Matt. + +00:45:19.840 --> 00:45:23.120 +And because it's something +that we have touched upon before + +00:45:23.680 --> 00:45:26.440 +and I'll start with you Michael here, +because we, + +00:45:26.440 --> 00:45:30.400 +you all were mentioning this +in the scope of ARIA. + +00:45:30.880 --> 00:45:36.640 +And so it's the question about having +richer alternatives to to the image + +00:45:36.640 --> 00:45:40.600 +description, to the standard alt text, +which is usually short. + +00:45:41.080 --> 00:45:44.440 +And what are your thoughts +on the usefulness + +00:45:44.440 --> 00:45:47.720 +of having richer descriptions + +00:45:48.200 --> 00:45:54.640 +for image alternatives? + +00:45:54.640 --> 00:45:56.000 +Oh. Let’s see + +00:45:58.240 --> 00:45:58.960 +as far as the + +00:45:58.960 --> 00:46:01.640 +general idea in terms of the usefulness +of of + +00:46:02.240 --> 00:46:08.680 +of making use of richer descriptions. So + +00:46:11.920 --> 00:46:12.880 +so for very simple + +00:46:12.880 --> 00:46:15.720 +images, you know, sort of the way the web + +00:46:16.320 --> 00:46:19.680 +started, +where images were largely providing small + +00:46:19.680 --> 00:46:21.280 +functional roles, +you know, the alt attribute + +00:46:21.280 --> 00:46:23.840 +was probably sufficient +for many of their cases. + +00:46:23.840 --> 00:46:28.720 +Images are being used in nowadays +for a variety of purposes. + +00:46:29.680 --> 00:46:33.480 +You know some of them are reducible +to an old like photo of my dog. + +00:46:33.480 --> 00:46:35.760 +But you know, that's not really providing +the experience. + +00:46:35.760 --> 00:46:39.560 +So, you know, there's definitely + +00:46:40.920 --> 00:46:44.040 +a need for a richer alternative + +00:46:45.840 --> 00:46:51.120 +and longer alternatives, +you know, ones that can have structures, + +00:46:51.120 --> 00:46:54.440 +you can skim them, +you know, depending on the context, ones + +00:46:54.440 --> 00:46:57.880 +that you can provide links +to the necessary bits of alternative data, + +00:46:58.440 --> 00:46:59.640 +which is... + +00:46:59.640 --> 00:47:01.840 +a question about images and charts. + +00:47:01.840 --> 00:47:06.280 +Often the description for a chart +is much more structured semantically + +00:47:06.520 --> 00:47:09.000 +than one for other kinds of images, +and that's + +00:47:09.160 --> 00:47:12.640 +you really want to be able to take on it, +take advantage of rich text markup. So + +00:47:13.920 --> 00:47:15.760 +I believe that, + +00:47:15.760 --> 00:47:18.840 +you know, assistive +technologies are supporting, + +00:47:18.840 --> 00:47:22.360 +you know, rich text descriptions +whenever they're available. + +00:47:23.360 --> 00:47:27.640 +So it's a question +of getting people to use them more. + +00:47:27.640 --> 00:47:31.840 +And of course, for machine learning, +generally, they would rather them + +00:47:31.840 --> 00:47:36.400 +do richer rather than less rich output. + +00:47:36.400 --> 00:47:37.240 +Okay. Yeah. + +00:47:37.240 --> 00:47:45.120 +And following up on that for Shivam +and for Amy, by having richer... + +00:47:45.280 --> 00:47:48.160 +richer and longer descriptions, + +00:47:48.160 --> 00:47:52.480 +are we increasing the, +the the the chances that + +00:47:52.920 --> 00:47:56.320 +AI generated descriptions will mess up + +00:47:56.800 --> 00:48:00.040 +or isn't that the risk + +00:48:00.040 --> 00:48:02.520 +Who wants to start? + +00:48:02.520 --> 00:48:06.880 +Amy? Sure I think we're definitely +yeah I agree + +00:48:06.880 --> 00:48:10.360 +that like oftentimes the more details + +00:48:10.360 --> 00:48:13.840 +that you get, the more + +00:48:13.840 --> 00:48:16.080 +the more opportunities +there are for errors. + +00:48:16.400 --> 00:48:19.080 +I think one way that +we've kind of explored this + +00:48:19.080 --> 00:48:23.120 +a little bit is seeing +if we can bring for for + +00:48:23.600 --> 00:48:27.440 +like very informative images +that maybe a lot of people will see. + +00:48:27.840 --> 00:48:30.120 +We've thought +about how we could maybe combine + +00:48:31.320 --> 00:48:32.280 +automated tools + +00:48:32.280 --> 00:48:35.480 +with with like human written descriptions + +00:48:35.480 --> 00:48:38.520 +to hopefully make +some of the descriptions better. + +00:48:38.520 --> 00:48:42.480 +So maybe automated tools could help you, +like help automatically extract + +00:48:42.480 --> 00:48:46.480 +the structure of the image, +and then humans could go in to write + +00:48:47.200 --> 00:48:50.480 +more detail about the parts of the images +that are really unlikely + +00:48:50.480 --> 00:48:54.520 +to be fully +like fully described by the computer. + +00:48:54.520 --> 00:48:57.600 +So so I think for now, the way + +00:48:57.600 --> 00:49:00.880 +I've been thinking about those +more complex images is often in like, + +00:49:00.880 --> 00:49:04.040 +how are we going to help +humans create descriptions + +00:49:04.960 --> 00:49:07.240 +more efficiently +by while still maintaining really + +00:49:07.240 --> 00:49:10.600 +high quality rather than thinking about +how to do it fully automatically? + +00:49:10.600 --> 00:49:13.800 +Just based on the images +I've looked at in the past year. + +00:49:15.240 --> 00:49:18.120 +OK, thanks and Shivam any input? + +00:49:18.840 --> 00:49:24.160 +Yeah I think the inspiration behind +the question would be to give a structure + +00:49:24.160 --> 00:49:29.800 +to the output of any of the old images +like so it can be a structure output + +00:49:29.960 --> 00:49:33.400 +make more sense than to we have a fallback +estimate right so you're + +00:49:35.040 --> 00:49:35.680 +you can + +00:49:35.680 --> 00:49:40.800 +provide more information to an output +but the output would rest + +00:49:40.840 --> 00:49:43.600 +should remain actually shorter +and more explainable. + +00:49:43.920 --> 00:49:47.200 +It may be grammatically more correct that would +make more sense to the end user. + +00:49:47.520 --> 00:49:50.280 +And he might have one other option +to explain that. + +00:49:50.520 --> 00:49:54.600 +It's not like you have a string generated +out of an image, right? + +00:49:55.600 --> 00:49:57.320 +When you read out to a screen, right + +00:49:57.320 --> 00:50:00.880 +your screen reader, +it should concisely read it shot briefly. + +00:50:00.880 --> 00:50:04.680 +And for more description, +there should be some other excellent + +00:50:04.680 --> 00:50:05.840 +data can be supplied to it. + +00:50:05.840 --> 00:50:08.440 +And then there are multiple ways +we can do this. + +00:50:08.800 --> 00:50:14.080 +But the description of an ultimate should +remain concise and grammatically correct. + +00:50:14.200 --> 00:50:16.320 +So that screen readers can try to read it, + +00:50:16.320 --> 00:50:19.080 +but that's how I see it. + +00:50:19.400 --> 00:50:20.920 +Okay. Thank you so much. + +00:50:20.920 --> 00:50:26.200 +And I want to thank the three of you +once more for agreeing to take part + +00:50:26.200 --> 00:50:30.000 +in this panel, also for agreeing +to take part in the next panel. + +00:50:30.480 --> 00:50:35.840 +So as we can see, media accessibility, +it's really a rich topic and + +00:50:36.480 --> 00:50:38.920 +definitely computer generated descriptions + +00:50:39.280 --> 00:50:42.560 +are also linked +with natural language processing. + +00:50:42.560 --> 00:50:45.440 +So what that will be the topic +for the next panel + +00:50:46.040 --> 00:50:48.520 +in just under 10 minutes. + +00:50:48.520 --> 00:50:53.360 +So we'll have a coffee break now +and I hope everyone's enjoying + +00:50:53.360 --> 00:51:00.840 +and we'll be back at ten past the hour. + diff --git a/pages/about/projects/wai-coop/symposium2-captions/do_no_harm.vtt b/pages/about/projects/wai-coop/symposium2-captions/do_no_harm.vtt new file mode 100644 index 00000000000..d9684654c76 --- /dev/null +++ b/pages/about/projects/wai-coop/symposium2-captions/do_no_harm.vtt @@ -0,0 +1,3464 @@ +WEBVTT +Kind: captions +Language: en + +00:00:00.080 --> 00:00:03.200 +And now let's move to the opening keynote, + +00:00:03.640 --> 00:00:08.600 +for which we are delighted +to welcome Jutta Treviranus + +00:00:08.680 --> 00:00:12.200 +Jutta is the director +of the Inclusive Design Research Center + +00:00:12.200 --> 00:00:17.880 +and a professor in the Faculty of Design +at the OCAD University in Toronto. + +00:00:18.280 --> 00:00:20.640 +So, Jutta, the floor is yours. + +00:00:21.760 --> 00:00:22.880 +Thank you. + +00:00:22.880 --> 00:00:26.960 +And if you stop sharing, +then I can share my slides. + +00:00:27.920 --> 00:00:29.360 +Thank you, Carlos. + +00:00:29.360 --> 00:00:35.720 +And it's a great pleasure to be able +to talk to you about this important topic. + +00:00:35.720 --> 00:00:38.120 +I am going to + +00:00:38.560 --> 00:00:41.000 +just start my slides + +00:00:43.080 --> 00:00:47.280 +and I'm hoping that +what you see is just the + +00:00:47.440 --> 00:00:50.960 +the primary slide, correct? + +00:00:50.960 --> 00:00:51.880 +Correct. + +00:00:51.880 --> 00:00:54.680 +Oh, wonderful. Okay. + +00:00:54.680 --> 00:00:56.120 +And thank you, everyone. + +00:00:56.120 --> 00:00:59.440 +I will voice my slides +and the information in the images. + +00:00:59.960 --> 00:01:03.960 +And I've titled my talk first, Do no harm. + +00:01:04.760 --> 00:01:07.880 +I'm usually a really optimistic person, + +00:01:07.880 --> 00:01:11.680 +and I'm hoping to provide +an optimistic message. + +00:01:11.680 --> 00:01:16.520 +But to realize the benefits of AI +I believe we need to first recognize + +00:01:16.880 --> 00:01:18.920 +and take into account the harms. + +00:01:19.520 --> 00:01:21.080 +I'm going to limit my discussion + +00:01:21.080 --> 00:01:24.120 +to the harms that are specific to people +with disabilities. + +00:01:25.040 --> 00:01:30.040 +There's a great deal of work detailing the +ethical concerns of currently deployed + +00:01:30.280 --> 00:01:35.120 +AI from lack of representation +to human bigotry, + +00:01:35.480 --> 00:01:39.560 +finding its way into algorithms +to manipulative practices, + +00:01:39.840 --> 00:01:44.640 +unfair value extraction and exploitation +and disinformation. + +00:01:45.000 --> 00:01:47.920 +I'll focus on accessibility +and disability, + +00:01:48.320 --> 00:01:50.600 +including the recognition that disability + +00:01:51.320 --> 00:01:54.760 +is at the margins of all other justice +deserving groups + +00:01:54.760 --> 00:01:58.880 +and therefore most vulnerable +to the general and emerging harms, + +00:01:58.880 --> 00:02:01.880 +but also the potential +opportunities of AI. + +00:02:02.960 --> 00:02:06.200 +Carlos shared a number of questions +that were submitted + +00:02:06.200 --> 00:02:10.600 +by those of you attending today, +and they are great questions. + +00:02:10.760 --> 00:02:13.880 +Shari and I have agreed +that these will be better + +00:02:13.880 --> 00:02:16.720 +covered through a conversation +than a presentation. + +00:02:17.160 --> 00:02:21.040 +So at the end of my talk, + +00:02:21.720 --> 00:02:24.680 +I'm going to invite Shari to talk + +00:02:25.200 --> 00:02:29.720 +about those particular questions, +and we'll do the same at the book + +00:02:29.720 --> 00:02:34.280 +ending talk +that Shari is giving tomorrow. + +00:02:37.400 --> 00:02:41.080 +So our society is plagued + +00:02:41.080 --> 00:02:43.720 +by more and more difficulties + +00:02:45.240 --> 00:02:48.000 +as the world becomes more and more complex + +00:02:48.000 --> 00:02:51.320 +and entangled, the choices +increase in ambiguity, + +00:02:51.320 --> 00:02:55.520 +the risks associated with each decision +become more consequential. + +00:02:55.960 --> 00:02:58.760 +The factors to consider in each decision + +00:02:58.760 --> 00:03:04.640 +more numerous, convoluted and confusing, +and especially in times of crisis + +00:03:04.640 --> 00:03:07.400 +like we've been experiencing +these last few years + +00:03:07.720 --> 00:03:11.800 +and in highly competitive situations +where there is scarcity, + +00:03:12.080 --> 00:03:15.920 +AI decision tools become more +and more attractive and useful. + +00:03:17.640 --> 00:03:20.480 +As a illustrative example, + +00:03:20.480 --> 00:03:24.400 +it is no wonder that over +90% of organized nations + +00:03:24.760 --> 00:03:28.560 +use some form of AI hiring tool, +according to the U.S. + +00:03:28.880 --> 00:03:31.600 +Equal Employment Opportunity Commission. + +00:03:32.560 --> 00:03:35.440 +As work becomes less formulaic + +00:03:35.440 --> 00:03:38.000 +and finding the right fit +becomes more difficult. + +00:03:38.600 --> 00:03:42.480 +They are a highly seductive tool. +As an employer + +00:03:42.960 --> 00:03:46.400 +when choosing who to hire +from a huge pool of applicants, + +00:03:46.400 --> 00:03:48.520 +what better way to sift through + +00:03:48.520 --> 00:03:52.520 +and find the gems +and eliminate the potential failed choices + +00:03:52.520 --> 00:03:57.320 +than to use AI system +with an AI tool making the decisions, + +00:03:57.320 --> 00:04:01.560 +we remove the risks of conflicts +of interest and nepotism. + +00:04:02.120 --> 00:04:06.480 +What better way to determine who will be +a successful candidate than to use + +00:04:06.480 --> 00:04:10.520 +all the evidence we've gathered +from our current successful employees, + +00:04:11.240 --> 00:04:15.400 +especially when the jobs we're trying to +fill are not formulaic. + +00:04:15.680 --> 00:04:18.840 +When there isn't a valid test +we can devise for candidates + +00:04:19.600 --> 00:04:21.560 +to determine their suitability. + +00:04:21.560 --> 00:04:25.920 +AI can use predictive analytics +to find the optimal candidates. + +00:04:26.840 --> 00:04:31.360 +In this way, we're applying solid, +rigorous science and what would be + +00:04:31.360 --> 00:04:34.400 +an unscientific decision otherwise, + +00:04:34.400 --> 00:04:37.520 +we're not relying +on fallible human intuition + +00:04:39.080 --> 00:04:39.720 +tools are + +00:04:39.720 --> 00:04:42.880 +even adding information +beyond the application + +00:04:42.880 --> 00:04:46.520 +to rule out falsehoods or exaggerations +in the applications. + +00:04:46.920 --> 00:04:48.800 +After all, you never know. + +00:04:48.800 --> 00:04:51.280 +There are so many ways to fake + +00:04:51.800 --> 00:04:55.400 +a work history, +a cover letter, or to cheat in academia. + +00:04:56.280 --> 00:05:00.640 +The AI hiring tools can verify through +gleaned social media data + +00:05:00.640 --> 00:05:05.200 +and information available on the web +or through networked employment data. + +00:05:05.440 --> 00:05:06.800 +After all, employees + +00:05:06.800 --> 00:05:10.720 +have agreed to share this +as part of the conditions of employment + +00:05:11.040 --> 00:05:15.200 +and other employers have agreed +as the conditions of using the tool. + +00:05:15.640 --> 00:05:18.800 +If that is not enough, AI administered +and processed + +00:05:18.800 --> 00:05:20.880 +assessments can be integrated + +00:05:22.400 --> 00:05:25.040 +and the tools are going +beyond the practical + +00:05:25.040 --> 00:05:29.120 +and qualitatively determinable capacity +of candidates + +00:05:29.480 --> 00:05:33.600 +to finding the best fit +culturally to make sure that the chosen + +00:05:33.600 --> 00:05:37.400 +candidates don't cause friction +but integrate comfortably. + +00:05:37.880 --> 00:05:41.920 +The tools will even analyze data +from interviews to rate the socio + +00:05:42.120 --> 00:05:44.480 +emotional fit of candidates. + +00:05:44.920 --> 00:05:48.800 +If that's not satisfactory, +an employer can tweak the system + +00:05:49.160 --> 00:05:53.200 +to add additional factors +such as their favored university + +00:05:53.520 --> 00:05:55.640 +or to create an ideal persona. + +00:05:56.000 --> 00:05:58.880 +Pick an ideal employee +as the model and the systems + +00:05:58.880 --> 00:06:02.600 +are becoming better +and more sophisticated in finding a match. + +00:06:03.240 --> 00:06:06.720 +The same system can then guide promotion +and termination, + +00:06:07.080 --> 00:06:10.400 +ensuring +consistency of employment policies. + +00:06:11.280 --> 00:06:13.840 +So what's wrong with this? Science. + +00:06:13.880 --> 00:06:15.920 +Math. Statistical reasoning. + +00:06:16.160 --> 00:06:18.840 +Efficiency. Accuracy. Consistency. + +00:06:19.320 --> 00:06:22.080 +Better +and more accurate screening for the best + +00:06:22.080 --> 00:06:25.720 +fit of the scientifically determined +optimal employ + +00:06:25.720 --> 00:06:29.480 +accurate replication +and scaling of a winning formula. + +00:06:30.160 --> 00:06:32.600 +It's a very seductive opportunity. + +00:06:33.680 --> 00:06:36.320 +What could be wrong + +00:06:36.720 --> 00:06:38.640 +for the employing organization? + +00:06:38.640 --> 00:06:43.640 +We arrive at a comfortable monoculture +that recreates and intensifies + +00:06:44.040 --> 00:06:46.200 +the successful patterns of the past. + +00:06:46.720 --> 00:06:50.440 +With more data and more powerful analysis. + +00:06:50.480 --> 00:06:53.960 +The intended target becomes more +and more precise. + +00:06:54.560 --> 00:06:58.040 +The employer finds more and more perfect +fits. + +00:06:58.440 --> 00:07:00.600 +What is wrong with that? + +00:07:00.680 --> 00:07:03.920 +For the organization, +what's wrong is what happens + +00:07:04.080 --> 00:07:08.000 +when the context changes, +when the unexpected happens. + +00:07:08.400 --> 00:07:11.840 +A monoculture doesn't offer +much adaptation, + +00:07:11.880 --> 00:07:14.720 +flexibility, or alternative choices. + +00:07:14.920 --> 00:07:16.320 +That's a visual description. + +00:07:16.320 --> 00:07:19.680 +I have an image showing +what happened to clone potatoes + +00:07:19.680 --> 00:07:25.000 +during a blight +that was survived by a diverse crop. + +00:07:25.600 --> 00:07:28.520 +Of course, we have diversity, +equity and inclusion + +00:07:28.520 --> 00:07:32.680 +measures to compensate for discriminatory +hiring and increase + +00:07:32.680 --> 00:07:36.520 +the number of employees +from protected underrepresented groups. + +00:07:37.400 --> 00:07:38.360 +But even there, + +00:07:39.320 --> 00:07:40.480 +there will be an + +00:07:40.480 --> 00:07:44.040 +even greater rift +between the monoculture and the candidates + +00:07:44.040 --> 00:07:48.680 +hired through diversity +and equity programs. + +00:07:48.800 --> 00:07:53.120 +What happens to the candidate +with a disability who would + +00:07:53.120 --> 00:07:57.080 +otherwise be a great fit +for doing the job, + +00:07:57.560 --> 00:08:00.040 +when judged by these hiring systems? + +00:08:01.840 --> 00:08:04.880 +When AI is analyzing, +sorting, filtering data + +00:08:05.360 --> 00:08:08.840 +about a large group of people, +what does disability look like? + +00:08:09.200 --> 00:08:12.280 +Where is disability in a complex +and tangled + +00:08:12.280 --> 00:08:14.400 +adaptive multivariate data set? + +00:08:15.360 --> 00:08:17.560 +Self-identification is often + +00:08:17.560 --> 00:08:20.840 +disallowed +and many people don't self-identify. + +00:08:21.200 --> 00:08:24.800 +Even if we had a way to identify +the definition + +00:08:24.800 --> 00:08:27.920 +and boundaries of disability +are highly contested. + +00:08:28.280 --> 00:08:32.120 +Disability statisticians are acutely aware +of some of the challenges. + +00:08:33.240 --> 00:08:37.280 +In any normal distribution, +someone with a disability is an outlier. + +00:08:37.360 --> 00:08:41.200 +The only common data +characteristic of disability is difference + +00:08:41.200 --> 00:08:43.080 +from the average or norm. + +00:08:43.080 --> 00:08:45.440 +People with disabilities +are also more diverse + +00:08:45.440 --> 00:08:47.600 +from each other than people +without disabilities. + +00:08:48.480 --> 00:08:52.520 +Data points in the middle are close +together, meaning they are more alike. + +00:08:52.560 --> 00:08:54.320 +Data points at the periphery + +00:08:54.320 --> 00:08:57.560 +are further apart, meaning +they are more different from each other. + +00:08:57.960 --> 00:09:01.080 +Data regarding people +living with disabilities are spread + +00:09:01.080 --> 00:09:05.520 +the furthest in what +I call the starburst of human needs. + +00:09:07.280 --> 00:09:09.320 +And as a result of this pattern, + +00:09:09.320 --> 00:09:12.800 +any statistically determined +prediction is highly accurate + +00:09:13.200 --> 00:09:17.000 +for people that cluster in the middle, +inaccurate, as you move from the middle + +00:09:17.280 --> 00:09:20.400 +and wrong, +as you get to the edge of a data plot + +00:09:24.160 --> 00:09:24.680 +here, + +00:09:24.680 --> 00:09:29.200 +I'm not talking about AI's ability +to recognize and translate things + +00:09:29.200 --> 00:09:33.520 +that are average or typical, +like typical speech or text or from one + +00:09:33.520 --> 00:09:38.040 +typical language to another or to label +typical objects in the environment + +00:09:38.360 --> 00:09:42.680 +or to find the path that most people +are taking from one place to another. + +00:09:43.200 --> 00:09:46.560 +But even there, in these miraculous tools +that we're using, + +00:09:46.560 --> 00:09:47.960 +if we have a disability, + +00:09:47.960 --> 00:09:52.240 +if your speech is not average or +the environment you're in is not typical, + +00:09:52.520 --> 00:09:55.880 +AI also fails. + +00:09:56.120 --> 00:09:59.760 +Disability is the Achilles heel of AI + +00:09:59.760 --> 00:10:03.160 +applying statistical +reasoning and disability. + +00:10:04.160 --> 00:10:06.240 +You have the culmination of + +00:10:06.320 --> 00:10:11.760 +diversity variability, +the unexpected complexity and entanglement + +00:10:12.080 --> 00:10:15.600 +and the exception +to every rule or determination. + +00:10:17.320 --> 00:10:18.880 +AI systems are used to find + +00:10:18.880 --> 00:10:21.600 +applicants that match predetermined optima + +00:10:21.880 --> 00:10:25.560 +using large data +sets of successful employees and hires. + +00:10:26.360 --> 00:10:30.280 +The system is optimizing +the successful patterns of the past. + +00:10:30.320 --> 00:10:32.520 +All data is from the past. + +00:10:32.520 --> 00:10:35.360 +The analytical power tool is honing in on + +00:10:35.360 --> 00:10:38.000 +and polishing +the factors that worked before, + +00:10:38.360 --> 00:10:41.200 +and we know how much hiring there is + +00:10:41.200 --> 00:10:43.280 +of people with disabilities in the past. + +00:10:44.480 --> 00:10:47.720 +The tool is built to be biased +against difference. + +00:10:47.760 --> 00:10:49.760 +Disability is difference. + +00:10:49.760 --> 00:10:51.440 +Different ways of doing the job. + +00:10:51.440 --> 00:10:55.320 +Different digital traces, +different work and education history, + +00:10:55.320 --> 00:11:00.920 +different social media topics, +and a tangled profile of many differences + +00:11:02.360 --> 00:11:06.200 +as AI gets better +or more accurate + +00:11:06.200 --> 00:11:10.920 +in its identification of the optima, +AI gets more discriminatory + +00:11:10.920 --> 00:11:15.960 +and better at eliminating applicants +that don't match the optima in some way. + +00:11:17.360 --> 00:11:20.760 +The assumptions +these air power tools are built + +00:11:20.760 --> 00:11:24.080 +upon are that scaling and replicating + +00:11:24.080 --> 00:11:26.840 +past success +will bring about future success. + +00:11:27.160 --> 00:11:31.760 +Optimizing data characteristics +associated with past successes + +00:11:32.000 --> 00:11:36.200 +increases future successes +and the data characteristics + +00:11:36.480 --> 00:11:40.200 +that determine success +need not be specified or known + +00:11:40.520 --> 00:11:45.440 +to the operators of the AI or the people +who are subject to the decisions. + +00:11:45.760 --> 00:11:49.480 +And the AI cannot articulate at the moment + +00:11:49.480 --> 00:11:54.280 +the highly diffuse and possibly +adaptive reasons behind the choices. + +00:11:54.600 --> 00:11:59.280 +Current AI systems cannot really explain +themselves or their choices. + +00:11:59.560 --> 00:12:06.920 +Despite the emergence of explainable AI, +how many of you have experienced tools + +00:12:06.920 --> 00:12:12.800 +like Microsoft Viva or many other +similar tools that purport to help you + +00:12:12.800 --> 00:12:16.840 +with be more efficient and productive +by analyzing your work habits? + +00:12:17.240 --> 00:12:21.760 +These surveillance systems provide more +and more granular data about employment, + +00:12:21.760 --> 00:12:26.360 +providing intelligence about the details +of the average optimal employee. + +00:12:27.440 --> 00:12:29.560 +The results of this AI design + +00:12:29.920 --> 00:12:34.160 +is that the optima will not be a person +with a disability. + +00:12:35.040 --> 00:12:38.680 +There are not enough successfully +employed persons with disability, + +00:12:39.800 --> 00:12:42.040 +but it is more than data gaps. + +00:12:42.080 --> 00:12:46.640 +Even if we have full representation +of data from persons with disabilities, + +00:12:47.000 --> 00:12:50.440 +there will not be enough +consistent data regarding success + +00:12:50.440 --> 00:12:53.000 +to reach probability thresholds. + +00:12:53.000 --> 00:12:57.040 +Even if all data gaps are filled, +each pattern will still be an outlier + +00:12:57.040 --> 00:13:01.400 +or a minority and will lack +probabilistic power in the algorithm. + +00:13:03.080 --> 00:13:05.040 +The same pattern is happening + +00:13:05.040 --> 00:13:08.200 +in all life altering difficult decisions. + +00:13:09.000 --> 00:13:12.600 +AI is being applied +and offered to competitive academic + +00:13:12.600 --> 00:13:15.840 +admissions departments +so you won't get admitted + +00:13:16.480 --> 00:13:20.320 +to beleaguered health providers +in the form of medical calculators + +00:13:20.320 --> 00:13:23.320 +and emergency triage tools resulting + +00:13:23.440 --> 00:13:26.000 +in more iatrogenic death and illness. + +00:13:26.320 --> 00:13:30.080 +If you're different +from your classification to policing + +00:13:30.080 --> 00:13:34.000 +to parole boards to immigration +and refugee adjudications + +00:13:35.360 --> 00:13:36.000 +to tax + +00:13:36.000 --> 00:13:40.360 +auditors, meaning more taxpayers +with disabilities are flagged to loans + +00:13:40.360 --> 00:13:43.800 +and mortgage officers, +meaning people with unusual asset + +00:13:43.800 --> 00:13:47.920 +patterns won't get credit +to security departments, meaning outliers + +00:13:47.920 --> 00:13:52.000 +become collateral damage. + +00:13:52.320 --> 00:13:57.160 +At a community level, we have evidence +based investment by governments, + +00:13:57.560 --> 00:14:02.400 +AI guiding political platforms, +public health decisions, urban planning, + +00:14:02.720 --> 00:14:06.080 +emergency +preparedness and security programs. + +00:14:06.560 --> 00:14:08.040 +None will decide. + +00:14:08.040 --> 00:14:11.800 +With the marginalized outlier, +the outliers will be marked + +00:14:12.040 --> 00:14:15.080 +as security risks. + +00:14:15.360 --> 00:14:18.440 +These are monumental life +changing decisions. + +00:14:18.440 --> 00:14:20.560 +But even the smaller, seemingly + +00:14:20.560 --> 00:14:24.600 +inconsequential decisions +can harm by a million cuts. + +00:14:25.240 --> 00:14:27.000 +What gets covered by the news? + +00:14:27.000 --> 00:14:29.120 +What products make it to the market? + +00:14:29.480 --> 00:14:32.840 +The recommended route +be provided by the GPS. + +00:14:33.480 --> 00:14:36.360 +The priority given to supply chain +processes. + +00:14:36.360 --> 00:14:40.600 +What design features +make it to the market? + +00:14:40.760 --> 00:14:45.640 +Statistical reasoning that is inherently +biased against difference from the average + +00:14:45.920 --> 00:14:51.440 +is not only used to apply the metrics, +but to determine the optimal metrics. + +00:14:52.800 --> 00:14:56.000 +And this harm predates AI, statistical + +00:14:56.000 --> 00:14:59.240 +reasoning as the means of making decisions +does harm. + +00:14:59.480 --> 00:15:03.320 +It does harm to anyone +not like the statistical average + +00:15:03.640 --> 00:15:06.200 +or the statistically determined optima. + +00:15:07.440 --> 00:15:11.480 +Assuming that that we know about the +what we know about + +00:15:11.480 --> 00:15:14.480 +the majority applies to the minority, +it does harm + +00:15:15.560 --> 00:15:20.320 +equating truth and valid evidence +with singular statistically determined + +00:15:20.480 --> 00:15:23.480 +findings or majority truth does harm + +00:15:25.680 --> 00:15:28.680 +and AI amplifies, + +00:15:28.680 --> 00:15:31.160 +accelerates and automates this harm + +00:15:32.040 --> 00:15:34.600 +and it is used to exonerate us + +00:15:34.600 --> 00:15:38.800 +of responsibility for this harm. + +00:15:39.520 --> 00:15:42.680 +We've even heard a great deal +about the concern for privacy. + +00:15:43.280 --> 00:15:47.600 +Well, people with disability are most +vulnerable to data abuse and misuse. + +00:15:48.080 --> 00:15:50.400 +De-identification doesn't work. + +00:15:50.520 --> 00:15:53.520 +If you are highly unique, +you will be re-identified. + +00:15:54.080 --> 00:15:57.800 +Differential privacy +will remove the helpful data specifics + +00:15:57.800 --> 00:16:02.920 +that you need to take to make the AI work +for you and your unique needs. + +00:16:03.160 --> 00:16:06.000 +Most people with disabilities +are actually forced + +00:16:06.000 --> 00:16:08.920 +to barter +their privacy for essential services. + +00:16:09.680 --> 00:16:11.720 +We need to go beyond privacy. + +00:16:11.720 --> 00:16:16.760 +Assume there will be breaches and create +systems to prevent data abuse and misuse. + +00:16:17.240 --> 00:16:21.120 +We need to ensure transparency regarding +how data is used, by whom + +00:16:21.400 --> 00:16:22.880 +and for what purpose. + +00:16:22.880 --> 00:16:25.360 +And it's wonderful that the EU is + +00:16:27.440 --> 00:16:30.560 +organizing this talk because the EU +is doing + +00:16:30.560 --> 00:16:34.600 +some wonderful measures in this regard. + +00:16:36.200 --> 00:16:39.080 +But wait, + +00:16:39.080 --> 00:16:41.120 +we're talking about a great number + +00:16:41.120 --> 00:16:43.320 +of harms, haven't we + +00:16:44.160 --> 00:16:47.440 +developed some approaches, +some solutions to this? + +00:16:47.960 --> 00:16:52.760 +Don't we have auditing tools that detect +and eliminate bias and discrimination of + +00:16:53.000 --> 00:16:56.520 +AI and don't +we have some systems that certify + +00:16:56.720 --> 00:16:59.200 +whether an AI is ethical or not? + +00:17:00.080 --> 00:17:03.600 +Can't we test tools for unwanted bias? + +00:17:03.720 --> 00:17:06.760 +Unfortunately, +AI auditing tools are misleading + +00:17:06.760 --> 00:17:10.400 +in that they don't detect bias +against outliers and small minorities + +00:17:10.400 --> 00:17:13.360 +or anyone who doesn't fit +the bounded groupings. + +00:17:13.760 --> 00:17:18.200 +Most AI ethics auditing systems +use cluster analysis, + +00:17:18.200 --> 00:17:21.920 +comparing the performance +regarding a bounded justice + +00:17:21.920 --> 00:17:25.640 +deserving group with the performance +for the general population. + +00:17:26.080 --> 00:17:29.600 +There is no bounded cluster +for disability. + +00:17:30.080 --> 00:17:33.360 +Disability +means a diffuse and highly diverse + +00:17:33.360 --> 00:17:36.400 +set of differences. + +00:17:36.680 --> 00:17:41.080 +Those AI ethics certification systems +and the industry + +00:17:41.080 --> 00:17:46.000 +that is growing around them +raise the expectation of ethical conduct + +00:17:46.840 --> 00:17:50.200 +that the problem has been fixed, +making it even more difficult + +00:17:50.200 --> 00:17:54.400 +for the individual to assert and address +harm. + +00:17:54.840 --> 00:17:59.120 +Many of them fall prey to Cobra effects +or the unintended consequences + +00:17:59.120 --> 00:18:03.560 +of oversimplistic solutions +to complex problems or linear thinking. + +00:18:04.240 --> 00:18:07.880 +Falling into the rut of mono-causality +when the causes are + +00:18:07.880 --> 00:18:09.880 +very complex and entangled, + +00:18:11.760 --> 00:18:12.320 +there's + +00:18:12.440 --> 00:18:16.160 +some helpful progress in +regulatory guidance. + +00:18:16.200 --> 00:18:18.880 +One example is the U.S. + +00:18:18.880 --> 00:18:23.000 +Equal Employment Opportunity Commission, +which has developed the Americans + +00:18:23.000 --> 00:18:26.200 +with disabilities Act +and the use of software, algorithms + +00:18:26.200 --> 00:18:30.280 +and artificial intelligence +to assess job applicants and employees. + +00:18:30.320 --> 00:18:32.000 +A very long title. + +00:18:32.000 --> 00:18:37.000 +But much of the guidance focuses on fair +assessments or tests and accommodation, + +00:18:37.400 --> 00:18:40.880 +not on the filtering out of applicants +before they are invited + +00:18:40.880 --> 00:18:45.880 +to take an assessment or by employers +who don't use assessments. + +00:18:46.440 --> 00:18:50.680 +The data related suggestion +is to remove the disability related data + +00:18:51.160 --> 00:18:54.680 +that is the basis of disability +discrimination. + +00:18:55.240 --> 00:18:58.520 +But what we found +is that the data cannot be isolated. + +00:18:58.520 --> 00:19:03.080 +For example, an interrupted work +history will have other data effects + +00:19:03.080 --> 00:19:08.000 +and markers, making it hard to match the +optimal pattern even when that is removed + +00:19:11.000 --> 00:19:13.480 +For the ethical harms there are + +00:19:13.760 --> 00:19:18.320 +that are common to a whole group +of marginalized individuals. + +00:19:18.800 --> 00:19:22.280 +There are numerous AI +ethics efforts emerging globally. + +00:19:22.560 --> 00:19:27.320 +We've tried to capture the disability +relevant ones in the We Count project. + +00:19:27.320 --> 00:19:32.040 +These include standards, bodies, +which are creating + +00:19:32.040 --> 00:19:34.880 +a number of standards +that act as guidance. + +00:19:35.240 --> 00:19:39.720 +Government initiatives +that are looking at the impact + +00:19:39.720 --> 00:19:42.600 +of their decisions +using automated decision tools. + +00:19:42.920 --> 00:19:46.320 +Academic research units +that are looking at the effects + +00:19:46.320 --> 00:19:50.000 +and possible approaches +and think tanks and not for profits. + +00:19:50.600 --> 00:19:54.280 +One of the things that we found, though, +is that disability is often + +00:19:54.280 --> 00:20:01.760 +left out of the considerations +or the ethics approaches. + +00:20:01.760 --> 00:20:07.040 +And we as the questions +that were submitted, indicate + +00:20:07.480 --> 00:20:09.640 +we're at an inflection point. + +00:20:09.640 --> 00:20:13.280 +And this current inflection point +we’re at reminds me + +00:20:13.280 --> 00:20:17.160 +of Burke and Ornstein +book, The Axemaker’s Gift, + +00:20:18.360 --> 00:20:19.520 +they wanted us to be + +00:20:19.520 --> 00:20:22.160 +aware of the Axemaker's gifts. + +00:20:23.480 --> 00:20:26.600 +Each time +the Axemaker offered a new way + +00:20:26.720 --> 00:20:29.840 +to cut and control +the world to make us rich + +00:20:30.920 --> 00:20:34.240 +or safe or invincible +or more knowledgeable, + +00:20:34.240 --> 00:20:38.040 +we accepted the gift and used it, +and we changed the world. + +00:20:38.040 --> 00:20:43.960 +We changed our minds for each gift, +redefined the way we thought, the values + +00:20:43.960 --> 00:20:47.000 +by which we lived, and the truths + +00:20:47.000 --> 00:20:49.640 +for which we died. + +00:20:54.080 --> 00:20:55.000 +But to regain + +00:20:55.000 --> 00:20:58.040 +my optimism, even AI's potential + +00:20:58.040 --> 00:21:01.080 +harm may be a double edged sword. + +00:21:02.200 --> 00:21:04.120 +The most significant + +00:21:04.120 --> 00:21:08.960 +gift of a AI is that it makes manifest +the harms that have been + +00:21:09.520 --> 00:21:12.880 +dismissed as unscientific concerns. + +00:21:13.520 --> 00:21:16.560 +It gives us an opportunity to step back + +00:21:17.080 --> 00:21:22.240 +and reconsider what we want to automate +or what we want to accelerate. + +00:21:22.960 --> 00:21:26.320 +It makes us consider what we mean by Best + +00:21:26.320 --> 00:21:30.920 +Buy optimal truth, democracy, +planning, efficiency, + +00:21:31.280 --> 00:21:36.080 +fairness, progress, and the common good. + +00:21:38.680 --> 00:21:41.840 +Some of +the things we've done within my unit + +00:21:41.840 --> 00:21:46.160 +to provoke this rethinking +include our inverted word cloud, + +00:21:46.840 --> 00:21:49.520 +which is a tiny little mechanism + +00:21:49.880 --> 00:21:53.840 +that conventional word cloud increases +the size and centrality + +00:21:53.840 --> 00:21:57.120 +of the most popular +or statistically frequent words. + +00:21:57.440 --> 00:22:01.640 +The less popular or outlying words +decrease in size and disappear. + +00:22:02.160 --> 00:22:04.800 +We've simply inverted that behavior. + +00:22:04.800 --> 00:22:08.120 +The novel +and the unique words go to the center + +00:22:08.320 --> 00:22:11.480 +and grow in size. + +00:22:12.120 --> 00:22:16.520 +We've been trying to provocate +with models like the Lawnmower of Justice, + +00:22:16.880 --> 00:22:20.720 +where we take the top off +the Gaussian curve or the bell curve + +00:22:21.000 --> 00:22:23.960 +as it might be called, +to remove the privilege + +00:22:24.320 --> 00:22:26.360 +of being the same as the majority. + +00:22:26.800 --> 00:22:33.280 +So the model needs to pay greater +attention to the breadth of data, + +00:22:33.280 --> 00:22:37.600 +and we're exploring +bottom up community led data ecosystems + +00:22:37.600 --> 00:22:40.680 +where the members govern +and share in the value of the data. + +00:22:41.080 --> 00:22:44.960 +This fills the gap left by things +like impact investing. + +00:22:45.160 --> 00:22:48.600 +For example, +when social entrepreneurship efforts + +00:22:48.600 --> 00:22:52.280 +that are supposedly addressing +these problems can't scale + +00:22:52.280 --> 00:22:56.400 +a single impactful formula +sufficiently to garner support. + +00:22:56.760 --> 00:23:02.120 +It also works well to grow knowledge +of things like rare illnesses that + +00:23:02.120 --> 00:23:06.240 +won't garner a market for the treatments +and therefore are not invested in + +00:23:09.240 --> 00:23:09.960 +And create, + +00:23:09.960 --> 00:23:14.920 +we're also creating tools to reduce harm +by signaling when a model will be wrong + +00:23:14.920 --> 00:23:18.560 +or unreliable, because the evidence +based guidance is wrong + +00:23:18.880 --> 00:23:20.880 +for the person being decided about. + +00:23:21.640 --> 00:23:24.920 +Here we're using a tool called the Data +Set Nutrition label + +00:23:25.280 --> 00:23:31.480 +that gives information about what +data is used to train the model. + +00:23:31.480 --> 00:23:35.640 +But back to the axemaker’s gifts +and the opportunity to reconsider + +00:23:35.640 --> 00:23:36.800 +where we're going. + +00:23:36.800 --> 00:23:41.880 +From a complexity theory perspective, +where I think we're collectively stuck + +00:23:41.880 --> 00:23:46.680 +on a local optima and unable +to unlearn our fundamental assumptions + +00:23:46.920 --> 00:23:50.120 +and approach approaches +to find the global optimal. + +00:23:51.080 --> 00:23:53.880 +And I believe there is a global optima + +00:23:55.280 --> 00:23:56.720 +At the moment, + +00:23:56.720 --> 00:24:00.440 +as a society, +we believe, or we act like we believe, + +00:24:00.760 --> 00:24:03.080 +to succeed, +we need to do what we've been doing + +00:24:03.080 --> 00:24:06.800 +more effectively, efficiently, +accurately and consistently. + +00:24:07.280 --> 00:24:12.120 +We're hill climbing, optimizing the +patterns of the past, eroding the slope. + +00:24:12.560 --> 00:24:16.280 +For anyone following us, +we need to stop doing + +00:24:16.280 --> 00:24:20.120 +the same things more efficiently +and potentially reverse course. + +00:24:21.280 --> 00:24:23.720 +I've been +considering the many local optima + +00:24:24.520 --> 00:24:27.560 +we keep hill climbing, +not just statistical reasoning + +00:24:27.560 --> 00:24:32.800 +that finds a single winning answer, +not just winner takes all zero sum game + +00:24:32.800 --> 00:24:38.120 +capitalism and economic growth +at all costs, but also majority rules. + +00:24:38.120 --> 00:24:40.040 +All or nothing decisions. + +00:24:40.040 --> 00:24:43.520 +And even in our community, +this accessibility community, + +00:24:43.840 --> 00:24:47.000 +the notion of a single checklist +of full accessibility + +00:24:47.400 --> 00:24:52.000 +for a group of hugely diverse people, +many of whom are not represented + +00:24:52.240 --> 00:24:55.360 +when coming up with the list, + +00:24:55.400 --> 00:24:58.800 +the people closest to the bottom +are more diverse, + +00:24:59.120 --> 00:25:02.840 +closest to the path we need to follow +to find the global optima. + +00:25:03.280 --> 00:25:07.720 +Less invested in current conventions, +we need to diversify + +00:25:07.720 --> 00:25:10.080 +and learn to use our complementary skills + +00:25:10.320 --> 00:25:13.040 +and learn from people +who are currently marginalized. + +00:25:13.680 --> 00:25:18.960 +Even in this community, +focused on accessibility. + +00:25:18.960 --> 00:25:24.680 +Because if anyone knows, +we know that it is at the margins or outer + +00:25:24.680 --> 00:25:29.200 +edge of our human starburst +that we find the greatest innovation + +00:25:29.200 --> 00:25:33.160 +and the weakest +and the weak signals of crisis to come. + +00:25:33.760 --> 00:25:38.360 +This is where you feel the extremes +of both the opportunities and the risks. + +00:25:41.120 --> 00:25:45.000 +One of the emerging uncertainties +that holds + +00:25:45.000 --> 00:25:48.760 +both greater +opportunities and risks is generative AI. + +00:25:49.520 --> 00:25:52.240 +What are the implications +if you have a disability? + +00:25:52.280 --> 00:25:54.320 +What will it do for accessibility? + +00:25:55.080 --> 00:25:59.200 +I'm sure you've heard about tools +like GPT, ChatGPT, + +00:25:59.800 --> 00:26:03.840 +stable infusion and various word +versions of Dall-E + +00:26:05.480 --> 00:26:08.480 +Midjourney and other emerging tools. + +00:26:08.520 --> 00:26:11.840 +Even today there's new announcements +regarding new tools. + +00:26:12.280 --> 00:26:16.720 +These tools do not rely on purely on +statistical reasoning. + +00:26:17.080 --> 00:26:20.360 +They can transfer learning +from context to context. + +00:26:20.760 --> 00:26:24.320 +They use new processes +called transformers + +00:26:24.320 --> 00:26:27.080 +that can pivot to new applications, + +00:26:27.640 --> 00:26:31.640 +but they can also create +convincing and toxic lies. + +00:26:31.640 --> 00:26:34.520 +People with disabilities +tend to be most vulnerable + +00:26:34.760 --> 00:26:37.800 +to the misuse and abuse of toxic tools. + +00:26:38.520 --> 00:26:40.880 +I'm going to invite Shari +to help me discuss + +00:26:40.880 --> 00:26:44.800 +these emerging possibilities. + +00:26:50.480 --> 00:26:51.560 +Hello. + +00:26:51.560 --> 00:26:52.960 +Hello, everybody. + +00:26:52.960 --> 00:26:54.600 +I'm Shari Trewin. + +00:26:54.600 --> 00:26:57.120 +I'm from Google. + +00:26:57.440 --> 00:27:00.120 +And a middle aged white woman + +00:27:00.120 --> 00:27:04.120 +with lots of smile lines on my face. + +00:27:09.400 --> 00:27:15.280 +So, Jutta, you’ve given us a lot to think about there. + +00:27:16.800 --> 00:27:21.640 +I wonder if we might start off +where you ended there. + +00:27:21.640 --> 00:27:25.000 +Talking a little bit +about generative A.I. + +00:27:25.040 --> 00:27:28.280 +models and language models + +00:27:29.360 --> 00:27:30.200 +and the + +00:27:30.200 --> 00:27:34.760 +they're trained on large corpora of data +that may not reflect + +00:27:35.120 --> 00:27:38.920 +the the moral values that we would like + +00:27:39.120 --> 00:27:41.360 +our models to incorporate. + +00:27:42.280 --> 00:27:45.080 +So one question I think +would be interesting for us to talk about + +00:27:45.080 --> 00:27:50.200 +is can we teach these large language +models or generative A.I. + +00:27:50.520 --> 00:27:52.960 +to apply these moral values, + +00:27:53.360 --> 00:27:56.880 +even though the +the very large datasets may not + +00:27:58.040 --> 00:28:00.680 +represent that? + +00:28:00.760 --> 00:28:02.600 +That's a great question. + +00:28:02.600 --> 00:28:06.240 +And in thinking +about how that might be done. + +00:28:07.480 --> 00:28:08.120 +One of + +00:28:08.120 --> 00:28:11.840 +the dilemmas +is that we may need to find a way + +00:28:11.840 --> 00:28:15.720 +to quantify complex, +abstract, qualitative values. + +00:28:16.760 --> 00:28:20.600 +And in that process, +will that reduce these values? + +00:28:21.320 --> 00:28:24.400 +I mean, deep learning lacks judgment. + +00:28:24.440 --> 00:28:28.400 +Humans sort of value human judgment +that isn't quantitative. + +00:28:29.120 --> 00:28:31.960 +Perhaps one way to start is + +00:28:31.960 --> 00:28:34.640 +by recognizing human diversity + +00:28:35.760 --> 00:28:38.720 +and the diversity of contexts. + +00:28:38.720 --> 00:28:41.000 +There is a lot of talk about + +00:28:41.960 --> 00:28:44.080 +individualizing applications + +00:28:44.440 --> 00:28:47.280 +without making the costs exorbitant + +00:28:47.760 --> 00:28:50.480 +and to the people that need them. + +00:28:51.200 --> 00:28:55.120 +The irony, of course, in that +is that the people that need + +00:28:55.120 --> 00:28:59.360 +that type of individualization +the most are also most likely + +00:28:59.360 --> 00:29:01.960 +to be the people +that can't afford it. + +00:29:02.720 --> 00:29:06.760 +And I think it's not yet known. + +00:29:06.760 --> 00:29:08.960 +Can we do that? + +00:29:08.960 --> 00:29:11.960 +And of course, +there's been surprising advances + +00:29:11.960 --> 00:29:17.120 +in all sorts of different areas +with respect to AI and generative AI. + +00:29:17.960 --> 00:29:22.560 +But I think this is the issue of values + +00:29:22.760 --> 00:29:27.400 +and shared values and the the articulation + +00:29:27.640 --> 00:29:33.760 +and making mechanizable, because, +of course, we're talking about a machine + +00:29:33.760 --> 00:29:38.280 +and mechanization values +that that we have difficulty + +00:29:38.280 --> 00:29:42.640 +even fully +expressing is it's quite a challenge. + +00:29:42.680 --> 00:29:43.960 +What do you think, Shari? + +00:29:45.320 --> 00:29:45.880 +Now, I + +00:29:45.880 --> 00:29:48.440 +think that +you've hit a really good point there with + +00:29:49.040 --> 00:29:52.440 +can we express or can we measure + +00:29:53.120 --> 00:29:57.400 +whether a model meets our values +or whether + +00:29:58.760 --> 00:30:01.640 +we think it is free from bias + +00:30:01.640 --> 00:30:04.360 +or as free from bias as we can make it? + +00:30:05.120 --> 00:30:06.440 +Do we know? + +00:30:06.440 --> 00:30:07.880 +Do we know how to evaluate? + +00:30:07.880 --> 00:30:10.520 +That I think is an important question. + +00:30:11.480 --> 00:30:15.320 +And some of the steps +that that often get missed + +00:30:15.320 --> 00:30:19.600 +when creating a system that uses AI might + +00:30:21.280 --> 00:30:24.680 +that might help with +that would be starting off + +00:30:24.680 --> 00:30:28.600 +from the beginning by thinking about +who are the people + +00:30:29.000 --> 00:30:34.000 +who might be at risk, what are the issues +that might be in the data? + +00:30:34.000 --> 00:30:39.800 +What historical biases +might the data represent and include? + +00:30:40.040 --> 00:30:44.280 +And then actively working with members +of those communities + +00:30:44.640 --> 00:30:48.960 +to understand how are we going to measure +fairness here? + +00:30:49.080 --> 00:30:50.880 +How are we going to measure bias? + +00:30:50.880 --> 00:30:52.200 +What's our goal? + +00:30:52.200 --> 00:30:54.280 +And how are we going to test? + +00:30:54.280 --> 00:30:57.560 +How are we going to know +when we've achieved our goal? + +00:30:59.160 --> 00:31:01.160 +So I think there's some + +00:31:01.400 --> 00:31:04.040 +progress +that can be made in the design process + +00:31:04.040 --> 00:31:08.560 +and thinking about the larger system +that we're embedding AI in, + +00:31:09.640 --> 00:31:13.960 +everything +doesn't have to be built into one AI model + +00:31:14.480 --> 00:31:18.640 +and we can augment models, +We can build systems around models + +00:31:18.800 --> 00:31:22.040 +that take into account +their limitations and + +00:31:23.360 --> 00:31:26.160 +create a better overall whole system. + +00:31:27.080 --> 00:31:31.680 +So thinking about what +the models are currently trained on + +00:31:31.680 --> 00:31:35.360 +and just the masses of data +that are used to build the models + +00:31:36.080 --> 00:31:39.360 +and the training data is rife + +00:31:39.360 --> 00:31:42.400 +with discrimination against difference. + +00:31:42.400 --> 00:31:46.520 +So how do we how do they unlearn? + +00:31:46.560 --> 00:31:51.920 +I mean, this is it's sort of +it matches some of the training + +00:31:51.920 --> 00:31:55.400 +that I do within my program and that + +00:31:56.720 --> 00:32:00.000 +students have been socialized +with very similar things. + +00:32:00.000 --> 00:32:03.600 +And then often the issue is not learning. + +00:32:03.600 --> 00:32:09.200 +The issue is unlearning, like +how do you remove those those unconscious + +00:32:10.520 --> 00:32:13.480 +habituated values that that + +00:32:14.600 --> 00:32:18.000 +are so embedded in our learning systems? + +00:32:18.360 --> 00:32:23.240 +So it is I agree that there is is huge + +00:32:23.240 --> 00:32:28.040 +opportunity, +especially with more context aware systems + +00:32:28.480 --> 00:32:31.400 +and maybe what we need to pursue + +00:32:31.400 --> 00:32:34.920 +is even to address things like + +00:32:35.960 --> 00:32:39.760 +privacy +and the need to swim against this massive + +00:32:39.760 --> 00:32:43.320 +amount of data +that is not applicable to you + +00:32:43.520 --> 00:32:46.440 +is on device + +00:32:46.440 --> 00:32:51.080 +personalized or not personalized, +because personalized is a term + +00:32:51.080 --> 00:32:54.440 +that's also sort of been hijacked to mean + +00:32:54.440 --> 00:32:57.640 +cushioning but individualized. + +00:32:57.640 --> 00:33:02.080 +Let's use that term system +that takes your data + +00:33:02.080 --> 00:33:04.840 +and creates a bottom up picture + +00:33:05.080 --> 00:33:07.000 +of what is needed. + +00:33:07.880 --> 00:33:10.000 +Yeah, I think, you know, there + +00:33:10.200 --> 00:33:15.160 +there is definitely interesting avenues +to explore with transfer learning. + +00:33:15.160 --> 00:33:19.080 +And can we take a model +that's been trained on data + +00:33:19.080 --> 00:33:23.360 +and has learned some of the concepts +of the task that we want, but + +00:33:23.600 --> 00:33:26.760 +maybe we'd like it to unlearn +some of the things that it's learned. + +00:33:27.120 --> 00:33:30.880 +Can we use techniques like transfer +learning to layer on top + +00:33:31.360 --> 00:33:34.640 +and and teach the +model and direct the model + +00:33:35.920 --> 00:33:38.200 +more in the direction that we want? + +00:33:38.200 --> 00:33:41.720 +And the +I think the the hopeful thing about that + +00:33:41.720 --> 00:33:44.600 +is that it needs orders of magnitude +less data + +00:33:45.240 --> 00:33:47.800 +to to train such a model. + +00:33:47.800 --> 00:33:50.240 +And so that makes it + +00:33:51.000 --> 00:33:53.720 +a little more achievable, +a little less daunting for + +00:33:54.240 --> 00:33:57.680 +for the community to take on. + +00:33:59.920 --> 00:34:00.640 +Yeah. + +00:34:01.880 --> 00:34:03.960 +Do you think that current + +00:34:04.200 --> 00:34:07.960 +regulation systems +are really up to the task of + +00:34:09.160 --> 00:34:11.920 +regulating current and emerging A.I. + +00:34:12.000 --> 00:34:14.920 +and preventing the kinds of harms +that you've been talking about? + +00:34:16.000 --> 00:34:18.480 +No. A simple answer. + +00:34:18.560 --> 00:34:20.480 +I don't think so. + +00:34:20.480 --> 00:34:23.360 +I mean, there's so many issues. + +00:34:23.360 --> 00:34:27.080 +Laws and policies are developed +at a much slower pace + +00:34:28.400 --> 00:34:28.880 +They’re. + +00:34:28.880 --> 00:34:31.360 +We're dealing with an uncertain + +00:34:31.360 --> 00:34:34.480 +very, very quickly +moving, quickly adapting area. + +00:34:35.120 --> 00:34:38.480 +And when laws are + +00:34:40.360 --> 00:34:42.520 +well, they need to be testable. + +00:34:42.520 --> 00:34:45.200 +And so in order to be testable, +we have to create + +00:34:45.200 --> 00:34:48.920 +these static rules that can be tested, +which means we have to be + +00:34:50.200 --> 00:34:51.560 +fairly specific as + +00:34:51.560 --> 00:34:54.400 +opposed to sort of general and abstract. + +00:34:55.000 --> 00:34:59.120 +And that tends to lead us +towards one size fits, + +00:34:59.120 --> 00:35:03.800 +one criteria +which we know are are not great. + +00:35:03.920 --> 00:35:07.800 +If we're trying to design for diversity +or encourage diversity. + +00:35:08.760 --> 00:35:12.200 +I think we, one of the things +we need to innovate in + +00:35:12.200 --> 00:35:17.560 +is the regulatory instruments +that we can use here. + +00:35:18.400 --> 00:35:21.600 +What's your thinking about this? + +00:35:21.600 --> 00:35:24.000 +Yeah, I think some of those + +00:35:24.000 --> 00:35:27.880 +regulatory instruments +that we have do apply. + +00:35:28.280 --> 00:35:31.240 +So if you're a company that's using a + +00:35:31.560 --> 00:35:34.520 +a AI system in screening job + +00:35:34.520 --> 00:35:37.760 +applicants, + +00:35:37.760 --> 00:35:40.880 +the disability discrimination laws +still apply to you. + +00:35:41.320 --> 00:35:43.680 +Somebody can still bring a lawsuit +against you + +00:35:44.720 --> 00:35:48.920 +saying that your system discriminated +against them and you are still liable + +00:35:49.400 --> 00:35:53.680 +to defend against that and to watch out + +00:35:53.680 --> 00:35:56.680 +for those kinds of issues. + +00:35:56.800 --> 00:36:01.160 +So in in some ways, +there are important pieces + +00:36:01.520 --> 00:36:06.440 +that we need in place +that can be used to tackle problems + +00:36:06.440 --> 00:36:09.800 +introduced when AI systems are introduced. + +00:36:10.520 --> 00:36:11.760 +But then in other ways, + +00:36:12.760 --> 00:36:14.400 +there was a lot more of a gray area. + +00:36:14.400 --> 00:36:16.720 +When the technology is not making + +00:36:17.840 --> 00:36:19.760 +discriminatory decisions, but + +00:36:19.760 --> 00:36:22.400 +it still might make harmful mistakes + +00:36:22.760 --> 00:36:25.520 +or that mislead people + +00:36:26.000 --> 00:36:29.400 +or that people are relying on it for. + +00:36:29.400 --> 00:36:34.400 +And so, you know, if anybody here has +a legal background, I would love to hear + +00:36:35.480 --> 00:36:38.720 +their take as well on + +00:36:39.000 --> 00:36:42.320 +how well do +current consumer protections apply. + +00:36:42.520 --> 00:36:45.080 +For example, if you're using an AI. + +00:36:45.880 --> 00:36:49.240 +I've become aware of and worried + +00:36:49.240 --> 00:36:52.440 +about the people for whom +the law isn't adequate. + +00:36:52.960 --> 00:36:57.680 +So the fact that we have a law, +the fact that we supposedly have measures + +00:36:57.680 --> 00:37:02.240 +that prevent abuse or unethical practice, + +00:37:02.640 --> 00:37:05.320 +if that if you are still + +00:37:05.760 --> 00:37:10.040 +being treated unethically, +then it makes it even harder for you. + +00:37:10.520 --> 00:37:16.160 +So I think the the measures that we do +have, the regulations that we do + +00:37:16.160 --> 00:37:20.000 +have have to have some way of continuously + +00:37:20.480 --> 00:37:23.600 +being iterated upon so that we can catch + +00:37:24.560 --> 00:37:27.080 +the individuals that are not included. + +00:37:27.080 --> 00:37:29.760 +And we have to recognize that + +00:37:29.760 --> 00:37:33.320 +our supposed solutions +are actually not solutions, + +00:37:33.320 --> 00:37:36.040 +that this is never fixed, that it's + +00:37:36.320 --> 00:37:38.640 +it requires this ongoing vigilance. + +00:37:39.200 --> 00:37:45.240 +And so the yeah, there's +that there's much more to say about that. + +00:37:45.240 --> 00:37:46.160 +But yes, you're right. + +00:37:46.160 --> 00:37:48.680 +It would be great to hear from any + +00:37:49.520 --> 00:37:55.640 +anyone with a legal background. + +00:37:55.640 --> 00:37:59.080 +Yeah. + +00:37:59.080 --> 00:38:02.760 +Let's maybe talk a little bit about it's + +00:38:03.080 --> 00:38:05.280 +a bit more about generative AI + +00:38:05.840 --> 00:38:09.320 +that you mentioned at the end there. + +00:38:09.320 --> 00:38:11.800 +Know it. It + +00:38:12.000 --> 00:38:17.120 +produces very plausible convincing + +00:38:19.640 --> 00:38:22.400 +statements when when asked the question, + +00:38:22.640 --> 00:38:26.360 +but it also very plausibly +and convincingly, + +00:38:26.360 --> 00:38:31.600 +completely makes things up +and isn't always reliable. + +00:38:31.600 --> 00:38:34.720 +And in fact right now is not connected + +00:38:35.040 --> 00:38:38.080 +to any form of ground truth or + +00:38:39.160 --> 00:38:41.600 +able to assess the accuracy of what makes + +00:38:42.880 --> 00:38:45.080 +the one +question I think that's interesting is + +00:38:45.320 --> 00:38:48.680 +will this technology reach a stage where + +00:38:50.280 --> 00:38:52.480 +it can support + +00:38:52.480 --> 00:38:58.160 +the kinds of decisions that we're using, +statistical reasoning for right now? + +00:38:58.160 --> 00:39:02.000 +Eventually, obviously, right now it's it's +not there yet. + +00:39:03.600 --> 00:39:03.920 +Yeah. + +00:39:03.920 --> 00:39:08.680 +And it's interesting because just recently +there have been the announcements + +00:39:09.120 --> 00:39:12.240 +of these systems +being used for medical guidance + +00:39:13.520 --> 00:39:15.440 +using large + +00:39:15.440 --> 00:39:19.960 +language models to come up with answers +to your medical questions, + +00:39:20.440 --> 00:39:23.600 +which of course is + +00:39:23.600 --> 00:39:24.480 +quite. + +00:39:25.040 --> 00:39:27.360 +Yeah, it'll be interesting +to see what happens. + +00:39:28.040 --> 00:39:31.160 +It's scary, I think is exactly scary. + +00:39:31.400 --> 00:39:36.560 +And what about the the the medical advice +given to someone where + +00:39:37.320 --> 00:39:42.040 +within the data set that is provided, +there isn't a lot of advice. + +00:39:42.040 --> 00:39:44.560 +So that given that the system does + +00:39:45.200 --> 00:39:49.480 +I mean ask any of the LLMs +or the chat bots + +00:39:49.880 --> 00:39:53.400 +how confident they are in their +their answers. + +00:39:53.400 --> 00:39:54.160 +They'll answer that + +00:39:54.160 --> 00:39:57.320 +they are confident +because there isn't a sense of + +00:39:58.520 --> 00:40:00.400 +what is the the risk level, + +00:40:00.400 --> 00:40:05.000 +what is the the confidence level +of this particular response. + +00:40:05.000 --> 00:40:10.280 +There +there's no self-awareness of what is wrong + +00:40:10.280 --> 00:40:13.680 +and what is right and what is the context +that is in front of me. + +00:40:14.560 --> 00:40:16.640 +Yeah, I think that's actually +a great opportunity. + +00:40:16.640 --> 00:40:21.000 +There to explore whether we can enable + +00:40:22.520 --> 00:40:25.360 +models a little better to know +what they don't know, + +00:40:26.000 --> 00:40:29.320 +to know when the case with right now + +00:40:29.840 --> 00:40:35.560 +isn't well-represented in their models +or maybe an outlier case. + +00:40:35.880 --> 00:40:38.000 +That they + +00:40:39.920 --> 00:40:41.120 +should perhaps pass on + +00:40:41.120 --> 00:40:45.880 +to some other form of decision +making or at least convey + +00:40:46.520 --> 00:40:48.560 +the less confidence + +00:40:48.960 --> 00:40:53.200 +in the voice of their, you know, + +00:40:53.480 --> 00:40:55.640 +I think generative AI today + +00:40:56.560 --> 00:41:00.440 +gives us a glimpse of the future +and the kinds of interactions + +00:41:00.680 --> 00:41:05.120 +that are possible, the kinds of ways +we might interact with technology + +00:41:05.440 --> 00:41:07.000 +in the future. + +00:41:07.000 --> 00:41:11.000 +You know, clearly there's +a research priority to ground it better. + +00:41:11.040 --> 00:41:12.640 +In truth, and + +00:41:13.880 --> 00:41:14.360 +it needs to + +00:41:14.360 --> 00:41:17.840 +be much more reliable, +much more trustworthy, much more accurate. + +00:41:18.320 --> 00:41:24.680 +But then you go to the +it can support serious applications. + +00:41:24.680 --> 00:41:28.160 +And the idea of +using it to get medical advice just is + +00:41:28.840 --> 00:41:32.000 +that that's a very, very scary + +00:41:32.360 --> 00:41:35.120 +because it is so eloquent + +00:41:35.840 --> 00:41:40.320 +that it's immediately trustworthy +and it gets enough things + +00:41:40.320 --> 00:41:44.200 +right +that we begin to trust it very quickly. + +00:41:44.200 --> 00:41:46.600 +And so in some ways, the the + +00:41:47.600 --> 00:41:50.680 +the advances that have been made are + +00:41:52.760 --> 00:41:54.440 +it's it's so good + +00:41:54.440 --> 00:41:58.720 +that it really highlights the dangers, + +00:41:58.720 --> 00:42:07.560 +I think, more effectively. + +00:42:07.560 --> 00:42:10.120 +So, yeah, I think that's interesting +to think about + +00:42:10.120 --> 00:42:12.920 +what would a human A.I. + +00:42:13.000 --> 00:42:17.080 +interaction look like in the future? + +00:42:17.080 --> 00:42:20.640 +Would would we need to change train + +00:42:21.720 --> 00:42:26.600 +or was one to divide by bias + +00:42:26.840 --> 00:42:31.080 +and kind of work with a larger language + +00:42:31.080 --> 00:42:34.360 +model to adapt responses? + +00:42:34.360 --> 00:42:35.600 +Would we you know how + +00:42:37.120 --> 00:42:40.040 +automatic +image description has sort of evolved? + +00:42:40.040 --> 00:42:41.360 +At first it would + +00:42:41.360 --> 00:42:45.440 +you know, we would throw out words +and that might be in the picture. + +00:42:45.440 --> 00:42:48.440 +And sometimes it was right +and sometimes it would be wrong. + +00:42:48.800 --> 00:42:53.240 +And now you see these generated +alternative text + +00:42:53.720 --> 00:42:58.320 +being phrased in a way +that conveys the uncertainty. + +00:42:58.320 --> 00:43:01.800 +So could be a tree or something like that. + +00:43:01.800 --> 00:43:05.120 +And I think the large language models +could do something + +00:43:05.120 --> 00:43:08.840 +similar to + +00:43:08.840 --> 00:43:11.360 +reduce the chances of misleading people. + +00:43:11.360 --> 00:43:15.440 +So they might say things +like many people seem to think + +00:43:16.040 --> 00:43:19.280 +blah, blah, blah, +or get better at citing sources. + +00:43:19.520 --> 00:43:23.240 +I think there's a lot of ways +that we can use + +00:43:23.240 --> 00:43:25.640 +these in direct research to, + +00:43:27.160 --> 00:43:32.480 +you know, to overcome some of the really +obvious failings that are there right now. + +00:43:32.480 --> 00:43:38.280 +But the limitations +that we currently have, + +00:43:38.280 --> 00:43:44.040 +Mark Urban has shared +in in the chat that I can see + +00:43:45.240 --> 00:43:48.040 +from the US government regulatory side, + +00:43:48.560 --> 00:43:51.160 +much of the current laws + +00:43:51.160 --> 00:43:56.400 +or regulations for access to government +services are about the technical + +00:43:56.400 --> 00:44:00.360 +accessibility of the interfaces +rather than the more + +00:44:01.560 --> 00:44:06.080 +AI focused questions +around system exclusion or mismatch. + +00:44:06.080 --> 00:44:07.440 +So that's + +00:44:08.480 --> 00:44:14.400 +coming back to our point +about the regulatory systems. + +00:44:14.400 --> 00:44:18.760 +And I just noticed that +Mike Calvo says what a Debbie Downer + +00:44:19.400 --> 00:44:23.120 +my talk is, which + +00:44:23.120 --> 00:44:25.520 +I think by design, + +00:44:25.520 --> 00:44:28.320 +we decided between Shari and I that + +00:44:28.760 --> 00:44:31.360 +I would provide the warnings + +00:44:31.480 --> 00:44:34.360 +and then Shari would provide optimism. + +00:44:36.120 --> 00:44:38.480 +I say I get the best of their. + +00:44:41.480 --> 00:44:44.120 +I think there are + +00:44:44.120 --> 00:44:47.480 +quite a few questions +in the question and answer panel. Yes. + +00:44:47.680 --> 00:44:51.040 +And so maybe what we should do is... + +00:44:51.160 --> 00:44:52.640 +Over there's so many things + +00:44:52.640 --> 00:44:56.480 +to explore with the emerging models +and so many uncertainties, + +00:44:56.800 --> 00:45:03.680 +but there's some great questions +there as well. + +00:45:04.320 --> 00:45:06.000 +Yeah, How about + +00:45:10.520 --> 00:45:13.880 +jumping around on me... + +00:45:13.880 --> 00:45:17.120 +So many new questions. + +00:45:17.120 --> 00:45:22.640 +So I know +this is not in the right order, but + +00:45:24.240 --> 00:45:26.560 +as people are adding questions, +they're kind of + +00:45:26.560 --> 00:45:31.640 +they're kind of jumping. + +00:45:31.640 --> 00:45:33.640 +Okay, so Bruce Bailey is asking. + +00:45:33.640 --> 00:45:35.440 +He says, Fantastic keynote. + +00:45:35.440 --> 00:45:39.200 +Please expound on personalization +having been hijacked + +00:45:39.440 --> 00:45:42.320 +to mean cushioning + +00:45:42.440 --> 00:45:44.840 +time and perspective as new. + +00:45:44.840 --> 00:45:45.360 +Sure. + +00:45:45.360 --> 00:45:48.560 +Yeah, I can definitely talk about that. + +00:45:48.560 --> 00:45:50.480 +I mean, one of the ways in which + +00:45:50.480 --> 00:45:54.480 +we recognize that we're all diverse, +and especially + +00:45:55.000 --> 00:45:58.520 +if you have a disability, you're diverse +from other people with disabilities + +00:45:58.920 --> 00:46:00.360 +and that our needs are there for + +00:46:00.360 --> 00:46:04.160 +a diverse has been to look at +how do we personalize. + +00:46:04.160 --> 00:46:08.600 +But personalization has been used + +00:46:08.720 --> 00:46:11.360 +as a term to look at + +00:46:13.160 --> 00:46:15.560 +using recommender engines, using + +00:46:15.560 --> 00:46:18.160 +various ways in which we’re offered + +00:46:18.600 --> 00:46:23.840 +only information +and recommendations from people like us, + +00:46:24.200 --> 00:46:28.800 +which of course removes any dissonance +and any diverse + +00:46:28.800 --> 00:46:33.480 +thinking and our exposure +to alternative views and perspectives. + +00:46:34.160 --> 00:46:38.280 +And to some extent it causes us to + +00:46:39.040 --> 00:46:43.120 +it causes greater polarization +because we're also + +00:46:44.000 --> 00:46:46.720 +offered a personalized view + +00:46:46.720 --> 00:46:49.520 +of the current stance that we're taking + +00:46:49.880 --> 00:46:52.720 +so that it gets confirmed again +and again and again. + +00:46:53.080 --> 00:46:56.480 +So I'm not talking about that type +of personalization. + +00:46:56.840 --> 00:47:01.920 +I'm talking about the type +of personalization where the interface + +00:47:02.480 --> 00:47:06.320 +makes it easier +for us to participate and addresses + +00:47:06.320 --> 00:47:09.720 +our specific, very diverse requirements +with respect + +00:47:10.120 --> 00:47:12.760 +to that participation so that + +00:47:13.760 --> 00:47:17.480 +I've moved +away from the term personalization simply + +00:47:17.480 --> 00:47:22.760 +because I don't want it to be mistaken +for the type of personalization + +00:47:23.120 --> 00:47:27.080 +that cushions us away from diverse +perspectives. + +00:47:27.080 --> 00:47:32.160 +Because certainly we need to be exposed +to those that diversity of perspectives, + +00:47:33.080 --> 00:47:35.600 +and we need to consider + +00:47:35.600 --> 00:47:40.160 +the diverse stories that people have. + +00:47:40.160 --> 00:47:47.080 +You know, I think personalization, +personalization is + +00:47:48.440 --> 00:47:52.480 +really part of accessibility in general, + +00:47:52.480 --> 00:47:55.560 +but there's + +00:47:55.560 --> 00:47:58.760 +you know, you're talking about +a particular kind of personalization, + +00:47:59.160 --> 00:48:00.520 +but AI personalization. + +00:48:00.520 --> 00:48:03.640 +I'm going to talk a little bit more +in the keynote at the end about an example + +00:48:03.640 --> 00:48:06.920 +of AI personalization +of personalized models + +00:48:06.920 --> 00:48:09.440 +that are + +00:48:12.800 --> 00:48:14.560 +providing personalized access + +00:48:14.560 --> 00:48:19.040 +to digital content, which I think +is a good use of personalization. + +00:48:19.400 --> 00:48:20.880 +Yeah, Yeah. + +00:48:21.800 --> 00:48:25.040 +So Carve or Convey + +00:48:25.520 --> 00:48:30.200 +from EDF says thank you +Jutta for this important keynote. + +00:48:30.200 --> 00:48:34.400 +I've seen different toolkits +to test and mitigate bias in AI. + +00:48:34.400 --> 00:48:39.640 +What is your view on them +and their usefulness? + +00:48:39.640 --> 00:48:42.080 +Right. So we've been doing a + +00:48:43.160 --> 00:48:44.280 +well actually + +00:48:44.280 --> 00:48:48.840 +as part of a number of our projects, +including ODD, which is optimizing + +00:48:48.840 --> 00:48:52.520 +diversity with disability +and our We Count project, + +00:48:52.800 --> 00:48:57.400 +we've been looking at a variety of A.I. + +00:48:57.400 --> 00:49:02.360 +ethics, auditing tools, but +also we've done sort of the secret shopper + +00:49:03.080 --> 00:49:05.960 +test of employment tools + +00:49:06.320 --> 00:49:11.680 +and then seen whether we can detect the +the particular biases + +00:49:12.000 --> 00:49:16.640 +that unwanted biases +as is is made clear by AI. + +00:49:16.640 --> 00:49:20.480 +So I mean bias of course, +the tools are intended to be biased. + +00:49:21.080 --> 00:49:24.680 +And so it's +the unwanted bias as a proviso. + +00:49:24.960 --> 00:49:27.760 +And what we find is that + +00:49:28.120 --> 00:49:30.880 +they're great at cluster analysis + +00:49:31.640 --> 00:49:34.400 +and then they they supplement the cluster + +00:49:34.400 --> 00:49:39.120 +analysis with a number of questions +that is + +00:49:39.120 --> 00:49:42.880 +asked of the implementer of the system. + +00:49:43.320 --> 00:49:46.440 +So the primary technical key to to + +00:49:46.440 --> 00:49:51.440 +the tools is determining +whether there is unfair + +00:49:51.440 --> 00:49:54.560 +treatment of one +bounded group with another. + +00:49:54.560 --> 00:49:57.040 +And that works +well if you have something like + +00:49:57.840 --> 00:50:02.760 +determining whether there's discrimination +regarding gender or discrimination + +00:50:03.440 --> 00:50:08.400 +regarding declared race or language +or those sorts of things + +00:50:08.640 --> 00:50:13.200 +which do cluster well, but it doesn't + +00:50:13.400 --> 00:50:18.560 +none of the tools really detect +whether there is discrimination + +00:50:19.600 --> 00:50:22.120 +based upon disability. + +00:50:22.120 --> 00:50:25.160 +And and the + +00:50:25.160 --> 00:50:27.400 +because the the particular + +00:50:28.680 --> 00:50:33.760 +discriminating characteristics +are so diffuse and different + +00:50:33.760 --> 00:50:38.960 +from person to person, +we can't see how it would be possible + +00:50:39.320 --> 00:50:42.560 +in a litigation perspective + +00:50:42.560 --> 00:50:45.080 +or in a regulatory perspective to prove + +00:50:45.560 --> 00:50:49.080 +that you have been discriminated +against it. + +00:50:49.240 --> 00:50:53.080 +It's going to be very, +very difficult to come up with that proof + +00:50:53.560 --> 00:50:57.440 +because the +the particular characteristics are + +00:50:59.080 --> 00:51:01.320 +themselves so entangled and diffuse. + +00:51:01.640 --> 00:51:05.600 +And so it may not be one +particular characteristic associated + +00:51:05.600 --> 00:51:08.240 +with your disability +that you would use to say, + +00:51:08.240 --> 00:51:11.480 +well, look at here, I'm +being discriminated against + +00:51:11.480 --> 00:51:16.640 +because of this characteristic +that relates to my disability. + +00:51:16.640 --> 00:51:17.000 +Yeah. + +00:51:17.000 --> 00:51:20.920 +So I think there are a lot +of the toolkits. + +00:51:21.320 --> 00:51:24.360 +Many of the methods in the toolkit +are group fairness + +00:51:24.840 --> 00:51:27.200 +metrics like, like you say, where. + +00:51:28.480 --> 00:51:30.920 +And that's an important thing to measure. + +00:51:30.920 --> 00:51:34.560 +It is when we when we do have the ability + +00:51:34.560 --> 00:51:39.320 +to identify groups and to know for sure +who's in which group and which one, + +00:51:39.960 --> 00:51:43.520 +the boundaries of these groups are always +not fuzzy. + +00:51:43.520 --> 00:51:46.080 +You know, there's the there's + +00:51:47.440 --> 00:51:50.080 +a deeply embedded assumption that there's + +00:51:50.080 --> 00:51:54.000 +only two genders, for example, +in a lot of the data. + +00:51:54.000 --> 00:51:56.000 +And many of these tools. + +00:51:56.000 --> 00:52:01.480 +So they have their problems +and this ability exemplifies this. + +00:52:01.640 --> 00:52:04.720 +The same problem. Yeah, same problems. + +00:52:04.920 --> 00:52:09.240 +But there are also individual +fairness metrics and measures, + +00:52:09.600 --> 00:52:13.240 +and some of the toolkits +include some of these kinds of measures. + +00:52:13.240 --> 00:52:16.800 +And so instead of asking, +is this group as a whole, + +00:52:17.000 --> 00:52:21.080 +treated equivalently to this other group, +they ask + +00:52:21.720 --> 00:52:26.200 +is a similar +are similar individuals treated similarly? + +00:52:26.200 --> 00:52:29.320 +And so you could imagine with an approach +like that, + +00:52:29.720 --> 00:52:33.920 +if I as an individual with my unique data, + +00:52:34.920 --> 00:52:35.520 +I could make a + +00:52:35.520 --> 00:52:38.000 +case that I was discriminated against by + +00:52:39.400 --> 00:52:41.880 +creating another person + +00:52:42.200 --> 00:52:45.080 +who was similar to me in the respects + +00:52:45.080 --> 00:52:47.440 +that are important for this job. + +00:52:47.440 --> 00:52:49.240 +Mm hmm. Yeah. + +00:52:49.720 --> 00:52:53.240 +And. And see what kind of result +they got compared to my result. + +00:52:53.240 --> 00:52:56.480 +And that would be, +you know, a way to measure + +00:52:56.480 --> 00:52:59.800 +individual fairness +and and build up a case. + +00:53:00.400 --> 00:53:01.640 +Yes. Yeah. + +00:53:01.640 --> 00:53:05.480 +Unfortunately, there's not that +many schools that that currently do that. + +00:53:05.480 --> 00:53:08.280 +And the certification systems +that currently exist + +00:53:08.640 --> 00:53:12.720 +are not implementing those +so that there is much to work on there. + +00:53:13.400 --> 00:53:14.000 +Yeah. + +00:53:14.000 --> 00:53:17.240 +Think it's sort of more of a case, a + +00:53:21.000 --> 00:53:22.160 +case by case basis, + +00:53:22.160 --> 00:53:25.320 +but this particular job to me + +00:53:25.320 --> 00:53:29.320 +so it's not so easy +to make a blanket statement about it, + +00:53:29.320 --> 00:53:31.960 +but I think it's not impossible to + +00:53:32.840 --> 00:53:38.000 +assess. + +00:53:38.000 --> 00:53:39.400 +Okay, so + +00:53:43.000 --> 00:53:44.480 +do we have time for one more? + +00:53:44.480 --> 00:53:44.920 +How long? + +00:53:44.920 --> 00:53:47.880 +How much longer do we have +another three minute? + +00:53:48.920 --> 00:53:51.600 +Well, you have almost 10 minutes more, + +00:53:51.600 --> 00:53:54.000 +so you can definitely take one more. + +00:53:55.160 --> 00:53:59.920 +Awesome, great. Um, + +00:54:01.800 --> 00:54:11.240 +let's see. + +00:54:11.240 --> 00:54:15.680 +So Fabian Berger + +00:54:16.640 --> 00:54:18.920 +says, I feel that AI + +00:54:18.920 --> 00:54:23.560 +but before it was KPIs or else are + +00:54:23.640 --> 00:54:28.120 +are searched by managers +to justify their decisions or run away + +00:54:28.440 --> 00:54:30.920 +from the responsibility +of their decisions. + +00:54:32.480 --> 00:54:34.520 +It follows a need for them + +00:54:34.800 --> 00:54:37.360 +but with a wrong incomplete answer. + +00:54:37.960 --> 00:54:42.200 +Do you agree? Yes. + +00:54:42.440 --> 00:54:46.080 +I mean, I think that the issue + +00:54:46.080 --> 00:54:50.080 +and I was trying to make that point, +but possibly not well enough + +00:54:50.960 --> 00:54:54.560 +that AI is doing much of what + +00:54:54.560 --> 00:54:57.920 +we've done before, but + +00:54:57.920 --> 00:55:00.200 +it's amplifying, accelerating + +00:55:00.200 --> 00:55:03.240 +and automating those things. + +00:55:03.560 --> 00:55:06.320 +And certainly AI can be used to + +00:55:06.400 --> 00:55:10.640 +for confirmation bias to find the specific + +00:55:11.760 --> 00:55:14.280 +justification for what it is + +00:55:14.280 --> 00:55:18.400 +that we need to justify whether it's +something good or something bad. + +00:55:18.400 --> 00:55:22.680 +So the a lot of the the harms of AI + +00:55:22.720 --> 00:55:25.040 +already existed + +00:55:26.360 --> 00:55:29.240 +because of course AI is learning + +00:55:29.240 --> 00:55:31.720 +our past practices and our data, + +00:55:32.520 --> 00:55:35.240 +but because it + +00:55:35.520 --> 00:55:38.720 +I guess I've often used the analogy +of a power tool + +00:55:39.200 --> 00:55:41.280 +before it was this + +00:55:42.360 --> 00:55:47.240 +practice +that was not that we did manually. + +00:55:47.240 --> 00:55:51.440 +And so there was an opportunity +to make exceptions, + +00:55:51.440 --> 00:55:55.960 +to reconsider, you know, +is this actually what we're we're doing? + +00:55:56.360 --> 00:55:58.640 +And to, to do something different? + +00:55:58.640 --> 00:56:01.720 +But with the with a power tool, it's + +00:56:02.480 --> 00:56:06.920 +it becomes this much more impactful thing. + +00:56:07.400 --> 00:56:10.680 +And there's less opportunity to + +00:56:10.720 --> 00:56:15.560 +to craft the approach that we take. + +00:56:15.560 --> 00:56:18.680 +Yeah, I think that's +why it's really important to + +00:56:19.520 --> 00:56:21.680 +to try to design for outliers + +00:56:21.840 --> 00:56:24.800 +and consider outliers in in and again + +00:56:24.800 --> 00:56:27.480 +I come back to this point of a system + +00:56:27.800 --> 00:56:32.360 +that the system +as a whole that includes AI if we can't + +00:56:34.040 --> 00:56:35.440 +guarantee that the AI + +00:56:35.440 --> 00:56:38.440 +itself is going to give us the + +00:56:38.440 --> 00:56:42.320 +characteristics that we want, +then we need to design around that + +00:56:42.800 --> 00:56:45.680 +and and be mindful of that + +00:56:45.680 --> 00:56:51.040 +while we're designing. + +00:56:51.040 --> 00:56:57.600 +There's also, of course, the opportunity +to try to clean up our data in general + +00:56:58.000 --> 00:57:01.960 +is there are in situations +where we can identify + +00:57:03.160 --> 00:57:05.640 +problems with the data, +we should certainly tackle + +00:57:05.640 --> 00:57:07.440 +that or imbalances in the data. + +00:57:07.440 --> 00:57:08.720 +We should certainly tackle that. + +00:57:08.720 --> 00:57:11.640 +That's one you know, one other step that + +00:57:11.760 --> 00:57:15.200 +I think there's many steps to fairness and + +00:57:16.160 --> 00:57:19.000 +and to ethical application of AI. + +00:57:19.520 --> 00:57:21.560 +And no one step + +00:57:22.480 --> 00:57:25.680 +is a magic solution to all of that. + +00:57:25.680 --> 00:57:27.440 +But if we + +00:57:27.800 --> 00:57:32.280 +stay aware of the risks, +make sure we're talking + +00:57:32.280 --> 00:57:37.200 +to the right people and involving them, +then I think we can we can at least + +00:57:38.360 --> 00:57:40.440 +mitigate problems and know + +00:57:40.440 --> 00:57:42.800 +the limits of the technologies +that we're using + +00:57:44.680 --> 00:57:47.720 +better. + +00:57:47.720 --> 00:57:51.440 +Yeah, I've been looking at some of +the other questions that have come in + +00:57:51.840 --> 00:57:57.800 +and one of the discussions was about +the Gaussian curve and or the Gaussian center. + +00:57:57.800 --> 00:58:00.080 +And one thing I think that + +00:58:01.120 --> 00:58:03.840 +one point that I may not have made +as clearly + +00:58:03.840 --> 00:58:07.640 +is that in fact the the myth + +00:58:07.640 --> 00:58:11.160 +that we need to have a single answer + +00:58:11.520 --> 00:58:17.000 +at the very middle of the Gaussian curve, +which of course matches our notion + +00:58:17.000 --> 00:58:21.560 +of majority rules +or as the way to decide amongst + +00:58:21.560 --> 00:58:23.680 +difficult decisions, it + +00:58:24.880 --> 00:58:28.200 +an alternative to that is to address + +00:58:28.200 --> 00:58:31.160 +the very, very diverse edges + +00:58:32.280 --> 00:58:34.520 +initially and to prioritize those. + +00:58:34.520 --> 00:58:39.560 +Because what what then happens is +it gives us room + +00:58:40.560 --> 00:58:43.240 +for to change. + +00:58:43.640 --> 00:58:45.800 +It helps us address the uncertainty. + +00:58:45.800 --> 00:58:49.040 +It makes the whole design or + +00:58:49.280 --> 00:58:52.480 +decision or options that are available + +00:58:54.440 --> 00:58:56.000 +much more + +00:58:56.000 --> 00:58:59.480 +generous and therefore prepares us better + +00:58:59.480 --> 00:59:03.680 +for the vulnerabilities that we're going +to experience in the future. + +00:59:03.680 --> 00:59:04.840 +So that, + +00:59:05.520 --> 00:59:08.440 +of course I'm an academic, and to say that + +00:59:08.800 --> 00:59:11.640 +that statistical reasoning and evidence + +00:59:11.960 --> 00:59:14.760 +through scientific methods is at + +00:59:14.760 --> 00:59:19.320 +fault is is a fairly dangerous thing +to say, especially during a time + +00:59:19.720 --> 00:59:23.440 +when truth is so much under attack and + +00:59:24.000 --> 00:59:29.000 +but I think what we need to do is +not reduce truth to statistical reasoning, + +00:59:29.680 --> 00:59:32.120 +but to acknowledge that there are + +00:59:32.840 --> 00:59:37.880 +a variety of perspectives, untruth, +and that we need to come up with one + +00:59:37.880 --> 00:59:43.040 +that that addresses +the people that we're currently excluding + +00:59:43.040 --> 00:59:58.400 +in our notions of truth. + +00:59:58.400 --> 01:00:01.480 +Yeah, So there's 2 minutes left. + +01:00:01.720 --> 01:00:05.080 +I, I think now + +01:00:05.880 --> 01:00:07.800 +maybe we could squeeze in one more + +01:00:07.800 --> 01:00:12.640 +question here. + +01:00:12.640 --> 01:00:14.280 +Jen Benjamin + +01:00:15.440 --> 01:00:17.040 +asks, Do we think + +01:00:17.040 --> 01:00:23.120 +that AI and big companies driving +research on it can be problematic towards + +01:00:23.120 --> 01:00:27.240 +societal issues which don't necessarily +give the highest revenue? + +01:00:29.040 --> 01:00:31.440 +And if so, how can it be faced? + +01:00:33.400 --> 01:00:35.920 +Yeah, that's a huge question. + +01:00:36.320 --> 01:00:37.680 +I think even government + +01:00:37.680 --> 01:00:43.400 +efforts are facing in their decision +making on profit + +01:00:43.400 --> 01:00:45.920 +and economic progress and + +01:00:47.800 --> 01:00:50.240 +and impact measures that are + +01:00:51.000 --> 01:00:55.760 +so that I think +one of the things we need to abandon + +01:00:56.160 --> 01:01:00.960 +is this idea +that a solution needs to be formulaic + +01:01:00.960 --> 01:01:04.480 +and that we need to scale it +by a formulaic replication. + +01:01:05.120 --> 01:01:08.000 +And we need to recognize that + +01:01:08.600 --> 01:01:11.480 +that there's a different form of scaling + +01:01:11.920 --> 01:01:14.760 +that is by diversification, + +01:01:15.200 --> 01:01:18.560 +and that we need +to contextually apply things. + +01:01:19.080 --> 01:01:25.680 +And I mean, that's one of the lessons +of Indigenous and indigenous cultures + +01:01:27.200 --> 01:01:29.280 +that the + +01:01:29.280 --> 01:01:34.040 +what, what has been labeled as colonialist +is, is actually what many governments + +01:01:34.040 --> 01:01:40.520 +are still implementing, even in things +like social entrepreneurship. + +01:01:41.000 --> 01:01:44.680 +But yes, big companies, +of course, are driven by profit and + +01:01:46.160 --> 01:01:49.160 +is that the best approach + +01:01:49.600 --> 01:01:52.680 +to achieve the common good? + +01:01:52.680 --> 01:01:54.840 +That's a huge question. + +01:01:54.840 --> 01:01:55.520 +I think. + +01:01:55.520 --> 01:01:59.320 +And I think this would be a great one +to come back to tomorrow, actually. + +01:01:59.320 --> 01:02:00.400 +Yeah, exactly. + +01:02:00.400 --> 01:02:03.200 +I mean, the symposium +let's let's come back to that one + +01:02:03.200 --> 01:02:05.680 +because I see that +we're out of time right now. + +01:02:05.680 --> 01:02:08.080 +But thank you very much. + +01:02:08.320 --> 01:02:09.560 +Thank you. + diff --git a/pages/about/projects/wai-coop/symposium2-captions/machine_learning.vtt b/pages/about/projects/wai-coop/symposium2-captions/machine_learning.vtt new file mode 100644 index 00000000000..8a72fcfbfbc --- /dev/null +++ b/pages/about/projects/wai-coop/symposium2-captions/machine_learning.vtt @@ -0,0 +1,3096 @@ +WEBVTT +Kind: captions +Language: en + +00:00:00.000 --> 00:00:04.000 +So, uh, we have Willian Watanabe + +00:00:04.480 --> 00:00:08.600 +from Universidade Tecnológica +Federal do Paraná, in Brazil. + +00:00:10.000 --> 00:00:14.080 +We have Yeliz Yesilada +from the Middle East + +00:00:14.080 --> 00:00:17.360 +Technical University, uh, Sheng Zhou + +00:00:17.800 --> 00:00:21.440 +from Zhejiang University in China. + +00:00:21.520 --> 00:00:24.080 +I hope I pronounced it correctly. + +00:00:24.080 --> 00:00:27.640 +And Fabio Paternò from CNR + +00:00:28.120 --> 00:00:31.240 +IST in Italy. + +00:00:31.840 --> 00:00:35.240 +Okay, Thank you all for joining us. And + +00:00:36.440 --> 00:00:39.040 +for some of you it’s earlier in the morning. + +00:00:39.280 --> 00:00:41.640 +For others of you, it's later. + +00:00:42.520 --> 00:00:45.520 +Well, for some of you, +I guess it's really late in the evening. + +00:00:46.520 --> 00:00:49.720 +So thank you all for your availability. + +00:00:50.560 --> 00:00:55.200 +And let's start this discussion on how + +00:00:56.200 --> 00:00:56.680 +I would say + +00:00:56.680 --> 00:01:01.600 +current machine learning algorithms +and current machine learning applications + +00:01:02.200 --> 00:01:06.880 +can support +or can improve methodologies for + +00:01:07.960 --> 00:01:10.880 +automatically assessing web accessibility. + +00:01:11.560 --> 00:01:15.640 +And from your previous works, + +00:01:16.040 --> 00:01:20.440 +you’ve touched different +aspects of how this can be done. + +00:01:20.880 --> 00:01:24.280 +So machine learning has been used + +00:01:25.080 --> 00:01:28.360 +to support web accessibility evaluation + +00:01:29.400 --> 00:01:34.680 +through different aspects +such as sampling, such as metrics, + +00:01:34.680 --> 00:01:39.480 +such as evaluation predictions, +such as handling dynamic pages. + +00:01:40.240 --> 00:01:42.520 +And so and I understand that + +00:01:42.520 --> 00:01:46.280 +these are domains, +not all of these domains + +00:01:46.600 --> 00:01:50.400 +you have work done on those, +but some of you have worked on + +00:01:50.640 --> 00:01:52.080 +specific domains. + +00:01:52.080 --> 00:01:55.640 +And so I would like you to focus on +the ones that you've been + +00:01:56.360 --> 00:01:58.360 +working more closely. + +00:01:58.360 --> 00:02:00.760 +And just for us to start, + +00:02:01.520 --> 00:02:04.960 +just let us know +what are the current challenges + +00:02:05.440 --> 00:02:08.760 +that prevent further development +and prevent further + +00:02:08.760 --> 00:02:11.720 +use of machine learning or other A.I. + +00:02:11.760 --> 00:02:14.560 +techniques in this specific domains? + +00:02:14.920 --> 00:02:15.400 +Okay. + +00:02:15.520 --> 00:02:22.960 +And I can start with you, Willian. + +00:02:22.960 --> 00:02:27.320 +First of all, thank you very much for... +for everything that is being organized, + +00:02:28.480 --> 00:02:29.160 +it’s great to be here. + +00:02:29.160 --> 00:02:33.760 +... Europe +and this to give some context + +00:02:34.160 --> 00:02:37.000 +and I'm Willian I'm +a professor here in Brazil. + +00:02:37.000 --> 00:02:39.280 +I work in accessibility, + +00:02:39.280 --> 00:02:42.040 +my my focus, my research + +00:02:42.040 --> 00:02:46.440 +focuses on web technologies, +the ARIA specification + +00:02:46.440 --> 00:02:48.560 +more specific and + +00:02:50.920 --> 00:02:54.760 +just in regards to everything +that has been said in the question + +00:02:54.760 --> 00:02:59.520 +by Carlos Duarte, +my focus is on evaluation prediction + +00:03:00.040 --> 00:03:02.120 +according to the ARIA specification + +00:03:02.560 --> 00:03:05.680 +and I believe the main... + +00:03:06.280 --> 00:03:08.920 +I was invited to this... + +00:03:10.480 --> 00:03:12.000 +to this panel + +00:03:12.200 --> 00:03:16.320 +considering my research on identification +of valences in web application. + +00:03:16.320 --> 00:03:19.200 +So the problem that I address is + +00:03:19.360 --> 00:03:22.240 +associated to identifying + +00:03:22.240 --> 00:03:22.960 +components + +00:03:22.960 --> 00:03:26.560 +In web applications. When we implement +web applications, we use semi-structured + +00:03:26.760 --> 00:03:29.680 +languages such as HTML. + +00:03:29.680 --> 00:03:32.680 +My job is to identify what + +00:03:32.680 --> 00:03:36.600 +these elements +in the HTML structure represent + +00:03:37.720 --> 00:03:39.880 +in the web page, like they can represent +some + +00:03:39.880 --> 00:03:42.240 +widgets, some specific type of widgets. + +00:03:42.800 --> 00:03:43.840 +There's some components. + +00:03:43.840 --> 00:03:45.760 +There are some landmarks that we need + +00:03:45.760 --> 00:03:47.520 +to identify in the web page. + +00:03:47.520 --> 00:03:49.720 +And this is basically what I do. + +00:03:49.720 --> 00:03:53.640 +So what I have been doing +for the last year, + +00:03:53.800 --> 00:03:58.480 +I have been using machine learning +for identifying these elements. + +00:03:58.480 --> 00:04:02.560 +I use supervised learning and I use data + +00:04:02.560 --> 00:04:07.080 +provided by the DOM structure +of the web application. + +00:04:07.080 --> 00:04:11.240 +So I search for elements in the web page +and classifiy them as an element, + +00:04:11.560 --> 00:04:14.520 +widgets or anything else. + +00:04:14.520 --> 00:04:18.720 +The challenges in regards to that. + +00:04:18.720 --> 00:04:19.360 +They are + +00:04:20.120 --> 00:04:22.240 +are kind of different from the challenges + +00:04:22.240 --> 00:04:26.360 +that have been addressed yesterday. +Yesterday... + +00:04:26.360 --> 00:04:29.840 +Yesterday... applications of machine +learning. + +00:04:29.880 --> 00:04:35.240 +I think they work with video in texts +that are unstructured data. + +00:04:35.320 --> 00:04:36.320 +So they are + +00:04:37.480 --> 00:04:39.160 +more complicated, I would say. + +00:04:39.160 --> 00:04:43.360 +And my... the main challenge +that I that I address in my research + +00:04:43.360 --> 00:04:46.560 +is associated with data acquisition +and data extraction + +00:04:47.120 --> 00:04:49.600 +where I identify +what kind of features that I + +00:04:50.480 --> 00:04:53.680 +I should use to identify these components +in web applications + +00:04:54.480 --> 00:04:57.920 +Associated with that I think they are +and to summarize, + +00:04:58.360 --> 00:05:01.480 +my problems are associated +with the diversity of web applications. + +00:05:01.480 --> 00:05:04.280 +There are different domains and + +00:05:06.400 --> 00:05:07.760 +this kind of bias + +00:05:07.760 --> 00:05:10.800 +and any dataset that we use, +it's difficult. + +00:05:10.800 --> 00:05:13.680 +For me. For instance, +to identify, + +00:05:13.680 --> 00:05:16.400 +a number of websites that implement + +00:05:16.400 --> 00:05:19.000 +that represents all the themes of websites + +00:05:19.000 --> 00:05:22.360 +that can be used, in web applications + +00:05:22.360 --> 00:05:27.120 +variability in the implementation +of HTML and JavaScript, + +00:05:27.120 --> 00:05:30.640 +and the use of automatic tools +for extracting this data + +00:05:31.800 --> 00:05:32.920 +such as + +00:05:32.920 --> 00:05:37.720 +web Driver API, the DOM +structure dynamics and mutation observers. + +00:05:37.720 --> 00:05:41.680 +There are a lot of specifications +that are currently being developed + +00:05:41.680 --> 00:05:45.560 +that I must use, and I always must + +00:05:45.560 --> 00:05:47.680 +keep my observing to + +00:05:48.640 --> 00:05:51.880 +to see if I can use them +to improve my research. + +00:05:52.960 --> 00:05:57.560 +And lastly, there is always the problem +of manual classification in... + +00:05:57.880 --> 00:06:00.880 +for generating these data sets +that I can use + +00:06:02.720 --> 00:06:03.640 +That’s it, Carlos. + +00:06:03.640 --> 00:06:05.000 +Thank you. + +00:06:05.000 --> 00:06:06.760 +Thank you Willian. + +00:06:06.920 --> 00:06:10.080 +So Yeliz... and thank you +Willian for introducing yourself + +00:06:10.240 --> 00:06:13.000 +because I forgot to ask +all of you that to do that. + +00:06:13.000 --> 00:06:15.040 +So your first intervention, please + +00:06:16.120 --> 00:06:20.240 +do give us a brief introduction about +yourselves and the work you've been doing. + +00:06:20.240 --> 00:06:22.560 +And so, Yeliz, I will follow with you. + +00:06:23.560 --> 00:06:24.960 +Hi. Hello, everybody. + +00:06:24.960 --> 00:06:25.840 +Good afternoon. + +00:06:25.840 --> 00:06:26.960 +Afternoon for me. + +00:06:26.960 --> 00:06:29.680 +So good afternoon, everybody. + +00:06:29.680 --> 00:06:31.000 +I'm Yeliz. + +00:06:31.000 --> 00:06:34.680 +I'm an associate professor at Middle East +Technical University + +00:06:34.720 --> 00:06:36.840 +Northern Cyprus Campus. + +00:06:36.840 --> 00:06:41.360 +I've been doing web accessibility +research for more than 20 years now. + +00:06:41.960 --> 00:06:47.920 +Time goes really fast and recently +I've been exploring machine learning + +00:06:47.920 --> 00:06:52.440 +and AI specifically for web accessibility. + +00:06:52.440 --> 00:06:55.440 +Supporting web accessibility +from different dimensions. + +00:06:56.720 --> 00:06:57.520 +Regarding the + +00:06:57.520 --> 00:07:00.800 +challenges, I think there are +of course many challenges. + +00:07:00.800 --> 00:07:05.920 +But as Willian mentioned, +I can actually say that + +00:07:05.920 --> 00:07:10.600 +kind of the biggest challenge for +my work has been data collection. + +00:07:11.760 --> 00:07:13.760 +So I can actually + +00:07:14.880 --> 00:07:17.680 +say that data, of course, is critical. + +00:07:17.680 --> 00:07:21.000 +As it was discussed yesterday +in the other panels, + +00:07:21.960 --> 00:07:25.360 +Data is very critical +for machine learning approaches + +00:07:25.760 --> 00:07:28.600 +and for us collecting data, + +00:07:29.120 --> 00:07:34.120 +making sure that the data is representing +our user groups, different user groups, + +00:07:34.520 --> 00:07:37.080 +and not biasing any user groups. + +00:07:38.000 --> 00:07:40.240 +And also, of course, preparing + +00:07:40.240 --> 00:07:42.920 +and labeling the data as certain + +00:07:43.440 --> 00:07:47.560 +machine learning algorithms, of course, +supervised ones they require labeling + +00:07:47.920 --> 00:07:51.120 +and labeling +has also been a challenge for us + +00:07:51.120 --> 00:07:56.320 +because sometimes a certain task it's +not so straightforward to do the labeling. + +00:07:56.320 --> 00:07:58.320 +It's not black and white. + +00:07:58.320 --> 00:08:01.440 +So it's been a challenge for us, +I think in that sense. + +00:08:01.880 --> 00:08:05.880 +And other two challenges I can mention is + +00:08:05.880 --> 00:08:09.280 +I think the second one +is the complexity of the domain. + +00:08:10.160 --> 00:08:14.280 +When you think about the web +accessibility, sometimes people think, Oh, + +00:08:14.320 --> 00:08:18.560 +it's quite straightforward, +but it's actually a very complex domain. + +00:08:19.240 --> 00:08:23.680 +There are many different user +groups, different user requirements, + +00:08:24.680 --> 00:08:26.640 +so understanding those + +00:08:26.640 --> 00:08:29.760 +and making sure that you actually address + +00:08:29.760 --> 00:08:32.840 +different users and different +requirements, it's quite challenging. + +00:08:33.400 --> 00:08:38.080 +And since we also are working, +this is the last one + +00:08:38.080 --> 00:08:42.760 +that I wanted to mention, +since we are also working with web pages. + +00:08:42.760 --> 00:08:48.920 +They are complex, they are not +well designed or well properly coded. + +00:08:48.920 --> 00:08:54.920 +As we always say, browsers are tolerating, +but for developing algorithms, machine + +00:08:54.920 --> 00:08:56.960 +learning algorithms, +they also have to deal + +00:08:56.960 --> 00:09:00.760 +with those complexities, +which makes the task quite complex. + +00:09:00.760 --> 00:09:01.200 +I think. + +00:09:01.200 --> 00:09:04.640 +So just to wrap up, I think in my work + +00:09:05.600 --> 00:09:07.840 +there are three major challenges + +00:09:07.840 --> 00:09:10.720 +data or the lack and quality of data. + +00:09:11.120 --> 00:09:14.560 +Complexity of the domain, +different users, different user + +00:09:14.560 --> 00:09:20.040 +requirements and the complexity +of the resources we are using. + +00:09:20.040 --> 00:09:24.640 +So web pages, +the source code and the complexity of + +00:09:26.080 --> 00:09:27.120 +pages that are not + +00:09:27.120 --> 00:09:31.040 +conforming to standards, +I think they are really posing + +00:09:31.040 --> 00:09:34.400 +a lot of challenges +to algorithms that we are developing. + +00:09:35.200 --> 00:09:37.600 +So these are all I wanted to say. + +00:09:38.400 --> 00:09:40.400 +Okay, Thank you, Yeliz. + +00:09:40.400 --> 00:09:41.600 +Very good + +00:09:42.400 --> 00:09:44.840 +summary of major challenges + +00:09:44.840 --> 00:09:47.160 +facing +everyone that works in this in this field. + +00:09:48.160 --> 00:09:49.560 +So thank you for that. + +00:09:49.560 --> 00:09:50.080 +Sheng... + +00:09:50.080 --> 00:09:53.080 +I want to go with you next. Okay. + +00:09:53.600 --> 00:09:54.760 +Thank you, Carlos. + +00:09:54.760 --> 00:09:55.320 +Hello everyone. + +00:09:55.320 --> 00:09:58.000 +I'm Shen Zhou from Zhejiang University China + +00:09:59.200 --> 00:10:00.400 +From my opinion view + +00:10:00.400 --> 00:10:04.680 +I have three I think three challenges +of course currently. Now. + +00:10:05.600 --> 00:10:08.920 +First, +I totally agree with the idea that it is + +00:10:09.400 --> 00:10:11.920 +hard to acquire labels +for more training. + +00:10:12.520 --> 00:10:13.680 +Since the success of machine + +00:10:13.680 --> 00:10:16.680 +learning heavily +relies on a large number of labeled data, + +00:10:17.920 --> 00:10:21.800 +however, accessing this data labels usually +costs lots of time, + +00:10:22.040 --> 00:10:26.240 +which may be hard to realize, +especially in the accessibility domain. + +00:10:27.360 --> 00:10:29.160 +I want to take the... + +00:10:29.160 --> 00:10:33.160 +take the W4A... + +00:10:33.160 --> 00:10:36.760 +Sorry, +I'm a little bit nervous here, sorry... + +00:10:37.800 --> 00:10:41.080 +I want to take the WCAG rule that's + +00:10:41.320 --> 00:10:44.760 +we will want to take an image with text +as an example. + +00:10:45.400 --> 00:10:48.520 +As we discussed in the panel yesterday, + +00:10:48.760 --> 00:10:54.080 +most of the current image captioning or +OCR methods are trained on existing assets + +00:10:54.640 --> 00:10:59.680 +rather than the image like logo +that is essential in text alternative + +00:11:00.280 --> 00:11:02.840 +The label for web accessibility evaluation + +00:11:02.840 --> 00:11:06.400 +should fully consider +the experience of different population. + +00:11:06.680 --> 00:11:10.360 +There are very few datasets +that are specifically designed + +00:11:10.360 --> 00:11:15.480 +for the accessibility evaluation +task and satisfies above requirements. + +00:11:15.880 --> 00:11:20.240 +So the machine learning model is that +traditional datasets cannot be + +00:11:20.240 --> 00:11:23.160 +well generalized +to accessibility evaluation. + +00:11:24.720 --> 00:11:28.200 +Second, +I think is about the web page sampling, + +00:11:28.200 --> 00:11:32.560 +since I have done +a little bit of work on this, I think + +00:11:32.560 --> 00:11:37.040 +currently there are multiple factors +that's affecting the sampling strategy. + +00:11:37.560 --> 00:11:38.960 +First, sampling + +00:11:38.960 --> 00:11:42.880 +has been a fundamental technique in +web accessibility evaluation + +00:11:42.960 --> 00:11:47.440 +when dealing with millions of pages. +The previous page sampling + +00:11:47.440 --> 00:11:51.520 +methods are usually based +on the features of each page. + +00:11:51.520 --> 00:11:55.040 +Such as the elements of the DOM tree +structure. + +00:11:55.640 --> 00:12:00.520 +The pages with similar features +assumed to be generated by the same + +00:12:00.960 --> 00:12:05.200 +development framework +and have similar accessibility problems. + +00:12:05.960 --> 00:12:09.600 +However, with the fast growth +of web development framework + +00:12:11.800 --> 00:12:13.960 +pages are developed with diverse tools. + +00:12:14.560 --> 00:12:17.320 +For example, pages that looks very + +00:12:17.320 --> 00:12:22.480 +similar may be developed by totally +different framework and some pages + +00:12:22.480 --> 00:12:26.040 +that look totally different +may be developed by the same framework. + +00:12:26.840 --> 00:12:31.160 +This poses great challenges for feature +based Web Accessibility evaluation. + +00:12:31.160 --> 00:12:34.720 +It is necessary +to incorporate more factors + +00:12:34.720 --> 00:12:38.760 +into the sampling process, +such as the connection topology + +00:12:38.760 --> 00:12:42.720 +among pages +and a visual similarity and typesetting. + +00:12:43.160 --> 00:12:47.640 +So how to identify the similarity +between pages considering + +00:12:47.760 --> 00:12:51.400 +multiple factors into a unified +sampling probability + +00:12:51.400 --> 00:12:54.400 +is critical for sampling. + +00:12:54.400 --> 00:12:58.800 +I think this could be a problem +that's related to the graph topology + +00:12:58.960 --> 00:13:00.000 +content understanding + +00:13:00.000 --> 00:13:03.080 +and metrical learning, +which is a comprehensive research program. + +00:13:04.200 --> 00:13:06.200 +So the third + +00:13:06.240 --> 00:13:10.400 +challenge +I think is the subjective evaluation rules. + +00:13:11.360 --> 00:13:14.200 +When we evaluate the web accessibility, + +00:13:14.360 --> 00:13:17.920 +there are both subjective +and objective rules, right? + +00:13:18.160 --> 00:13:21.920 +So for example, +when evaluating the WCAG success + +00:13:21.920 --> 00:13:25.120 +criterion, 1.4.5 images of text. + +00:13:25.640 --> 00:13:29.360 +The image is expected to be +associated with accurate + +00:13:29.360 --> 00:13:34.200 +description of text which has been +discussed in the panel yesterday. + +00:13:34.800 --> 00:13:38.320 +It is still challenging to verify +the matching + +00:13:38.520 --> 00:13:47.520 +between the... + +00:13:47.520 --> 00:13:47.920 +Yeah. + +00:13:49.760 --> 00:13:52.120 +I guess, uh, + +00:13:52.120 --> 00:13:56.800 +there are some connection issues. + +00:13:56.800 --> 00:13:59.800 +Let's see. Okay. + +00:14:00.440 --> 00:14:03.960 +He has dropped so. + +00:14:03.960 --> 00:14:05.560 +So uh, + +00:14:06.920 --> 00:14:09.280 +we’ll let Sheng... ok, he is coming back so + +00:14:13.280 --> 00:14:16.240 +you're muted. + +00:14:16.240 --> 00:14:19.080 +Oh, okay. All right. Okay. All right. + +00:14:19.880 --> 00:14:20.680 +So can you. + +00:14:20.680 --> 00:14:23.640 +Can you continue? + +00:14:23.640 --> 00:14:25.120 +Okay. I'm so sorry. + +00:14:25.120 --> 00:14:28.320 +Uh, okay. Okay. + +00:14:28.320 --> 00:14:31.120 +I think there are three challenges. + +00:14:31.120 --> 00:14:33.280 +And the first challenge is + +00:14:34.600 --> 00:14:37.040 +same as Yeliz just described it. + +00:14:37.040 --> 00:14:38.800 +That's we. It is harder to + +00:14:41.080 --> 00:14:42.400 +we. You + +00:14:42.400 --> 00:14:45.440 +dropped when you were starting to talk +about the third challenge. + +00:14:46.280 --> 00:14:46.760 +Okay. + +00:14:46.760 --> 00:14:49.720 +Okay, So we still got the first and second +challenge. + +00:14:49.720 --> 00:14:51.880 +We, we heard that loud and clear. + +00:14:51.880 --> 00:14:55.120 +So now you can resume on the third +challenge. + +00:14:55.880 --> 00:14:57.240 +Okay? Okay. Okay. + +00:14:57.240 --> 00:15:02.520 +So the first challenge is, I think +is the subjective evaluation rules. + +00:15:03.040 --> 00:15:06.480 +This when evaluating +the web accessibility + +00:15:06.480 --> 00:15:10.760 +there are both subjective +and objective rules. + +00:15:10.760 --> 00:15:14.880 +For example, +when evaluating the WCAG success criteria, + +00:15:15.120 --> 00:15:18.280 +1.4.5 Images of text. + +00:15:18.640 --> 00:15:22.960 +The image is expected to be +associated with accurate + +00:15:23.080 --> 00:15:27.280 +description text +as discussed in the panel yesterday. + +00:15:27.320 --> 00:15:31.600 +It is still challenging to verify +whether the matching between image + +00:15:31.720 --> 00:15:36.320 +with text, since we do not have access +to the ground thruth of the + +00:15:36.760 --> 00:15:38.680 +text of the image. So at + +00:15:47.160 --> 00:15:49.320 +okay apparently + +00:15:50.560 --> 00:15:51.920 +we lost. + +00:15:52.000 --> 00:15:58.360 +Sheng again. + +00:15:58.360 --> 00:16:02.520 +So let's just give him 10 seconds +and see if he reconnects. + +00:16:02.520 --> 00:16:05.920 +Otherwise we will move on to Fabio. + +00:16:11.840 --> 00:16:12.880 +okay, so perhaps it's + +00:16:12.880 --> 00:16:15.800 +better to to move on to Fabio and and + +00:16:16.920 --> 00:16:19.440 +get the perspective of someone + +00:16:20.200 --> 00:16:25.240 +who is making an automated accessibility +evaluation tool available. + +00:16:25.240 --> 00:16:28.120 +So it's certainly going to be interesting, +so Fabio. + +00:16:28.120 --> 00:16:30.200 +Can you can take it from here? + +00:16:30.760 --> 00:16:32.320 +Yeah, yeah, yeah. + +00:16:32.320 --> 00:16:33.800 +So, I’m Fabio, I’m a + +00:16:33.800 --> 00:16:37.600 +Research director +at the Italian National Research Council, + +00:16:37.600 --> 00:16:42.280 +where I lead the laboratory on human interfaces +and information systems, and we have + +00:16:42.280 --> 00:16:47.800 +now a project funded +by the National recovery and resilience + +00:16:47.800 --> 00:16:51.160 +plan, +which is about monitoring the + +00:16:52.240 --> 00:16:56.040 +accessibility +of the public administration websites. + +00:16:56.800 --> 00:17:00.000 +And so, I mean, in this project +we have our tool MAUVE++, + +00:17:00.800 --> 00:17:04.920 +which is a tool open, +freely available + +00:17:05.440 --> 00:17:09.680 +and it has already more than 3000 +registered users + +00:17:10.000 --> 00:17:15.080 +and we recently performed +an accessibility evaluation of + +00:17:15.120 --> 00:17:20.280 +10,000 websites considering +200 pages for each website. + +00:17:20.280 --> 00:17:25.000 +So it’s really large scale... + +00:17:25.000 --> 00:17:29.120 +So we were very interested +in understanding how machine learning + +00:17:30.480 --> 00:17:31.560 +can help us + +00:17:31.560 --> 00:17:36.520 +in these, you know, large scale +monitoring work. So I mean, for this purpose... + +00:17:37.120 --> 00:17:40.000 +I’m more research... +so before this panel + +00:17:40.040 --> 00:17:43.240 +I did a small, you know, +systematic literature + +00:17:43.240 --> 00:17:43.840 +review + +00:17:43.840 --> 00:17:49.440 +So I went to the ACM digital library, +I entered machine learning and accessibility evaluation + +00:17:49.440 --> 00:17:51.960 +just curious to see +what has been done so far. + +00:17:52.600 --> 00:17:55.920 +So I got only 43 results +which are not too many, I mean + +00:17:56.560 --> 00:18:01.160 +I would have expected more. +Then I looked through all these papers and actually + +00:18:01.400 --> 00:18:05.280 +in the end, only 18 actually applied, +because other papers were more + +00:18:05.280 --> 00:18:08.360 +about, ok, machine learning can +be interesting in future work, and so on. + +00:18:08.360 --> 00:18:12.680 +I mean, so they say that the +specific research efforts + +00:18:12.720 --> 00:18:15.680 +have been so far limited + +00:18:15.880 --> 00:18:20.160 +in this area, and another characteristic +was that they were rather varied + +00:18:20.160 --> 00:18:22.240 +in terms of the topic that they address. + +00:18:22.240 --> 00:18:26.920 +So there are people who try to predict the website +accessibility based on the accessibility of some pages + +00:18:26.920 --> 00:18:31.920 +others try to check the meaningfulness +of alternative descriptions + +00:18:31.920 --> 00:18:36.880 +others classify user interface +content elements. + +00:18:36.920 --> 00:18:41.800 +So I would say that +one challenge at this point is + +00:18:43.840 --> 00:18:44.680 +well, machine + +00:18:44.680 --> 00:18:48.120 +learning can give some, you know, +useful complementary + +00:18:48.520 --> 00:18:51.080 +support to the automatic tools + +00:18:51.240 --> 00:18:54.200 +that we already have + +00:18:54.440 --> 00:18:57.600 +as there are many... in theory +there are more opportunities. + +00:18:57.600 --> 00:19:02.920 +But then in practice +there are a lot of problems. + +00:19:02.920 --> 00:19:07.600 +Another challenge... identifying the relevant +datasets and what are the features + +00:19:07.600 --> 00:19:10.120 +that are really able to characterize the + +00:19:10.800 --> 00:19:13.720 +type of aspects that we want to investigate. + +00:19:14.360 --> 00:19:16.720 +And I would say the third and + +00:19:17.320 --> 00:19:22.200 +last main general challenge +is that we really + +00:19:22.720 --> 00:19:26.240 +work with these computers who change. +In the web + +00:19:26.240 --> 00:19:30.320 +this means that how people +implement, how people use + +00:19:30.840 --> 00:19:32.720 +the application is +continuously changing. + +00:19:32.720 --> 00:19:33.720 +So there is also + +00:19:33.720 --> 00:19:36.240 +the risk that +the dataset becomes soon + +00:19:37.000 --> 00:19:40.480 +obsolete, not sufficiently updated + +00:19:40.560 --> 00:19:46.720 +for addressing all the emerging +needs that can occur. + +00:19:46.720 --> 00:19:47.560 +Okay. + +00:19:47.560 --> 00:19:50.560 +Thank you for that perspective and Sheng + +00:19:52.080 --> 00:19:54.120 +I want to give you now the opportunity + +00:19:54.120 --> 00:19:56.760 +to finish up your intervention. + +00:19:57.960 --> 00:19:59.320 +Okay. + +00:19:59.320 --> 00:20:02.880 +Thank thank you, Carlos, and sorry +for the lagging here + +00:20:03.880 --> 00:20:07.000 +so and so I will continue my + +00:20:07.200 --> 00:20:11.640 +third opening of the challenge. +From my opinion + +00:20:11.640 --> 00:20:15.520 +the third challenge is the +subjective evaluation rules. + +00:20:15.960 --> 00:20:18.720 +This one, evaluating web accessibility + +00:20:18.720 --> 00:20:23.200 +there are both subjective +and objective rules and one, + +00:20:24.200 --> 00:20:28.760 +for example, +when evaluating the image to text rule, + +00:20:28.960 --> 00:20:33.960 +the image is expected to be associated +with accurate description texts + +00:20:34.480 --> 00:20:38.320 +and and +and as discussed in the panel yesterday, + +00:20:38.320 --> 00:20:42.320 +it is still challenging +to verify the matching between the image + +00:20:42.520 --> 00:20:45.600 +and the the text +since there are no ground truth. + +00:20:46.280 --> 00:20:49.640 +What kind of text +should describe this image? + +00:20:50.200 --> 00:20:54.560 +So as a result, the accessibility +evaluation system is harder to justify + +00:20:54.720 --> 00:20:58.840 +whether the alternate text +really matches the image. + +00:20:59.280 --> 00:21:03.040 +So, thanks. + +00:21:03.040 --> 00:21:04.000 +Okay. Thank you. + +00:21:04.000 --> 00:21:08.760 +And I'll take it from what I guess +most of you. + +00:21:08.760 --> 00:21:14.120 +Well, all of you have in one way +or another mentioned one aspect of + +00:21:15.200 --> 00:21:17.080 +web accessibility evaluation, + +00:21:17.080 --> 00:21:19.600 +which is conformance to + +00:21:20.840 --> 00:21:22.680 +the requirements to guidelines. + +00:21:22.680 --> 00:21:27.280 +You, several of you mentioned the web +content accessibility guidelines + +00:21:27.880 --> 00:21:30.400 +in one way or another, and + +00:21:33.000 --> 00:21:36.040 +checking what we do currently. + +00:21:36.040 --> 00:21:39.400 +So far it's and following up on +what Sheng + +00:21:39.600 --> 00:21:42.760 +was just mentioning, are objective rules. + +00:21:42.760 --> 00:21:46.360 +So that's what we can do so far, right? + +00:21:46.360 --> 00:21:51.480 +Then when we start thinking about +and because the guidelines are themselves + +00:21:51.800 --> 00:21:55.080 +also subject to subjectivity +and fortunately + +00:21:57.040 --> 00:21:59.040 +at the + +00:21:59.720 --> 00:22:02.320 +how can we try + +00:22:02.320 --> 00:22:06.440 +to automate the access, the evaluation + +00:22:06.440 --> 00:22:09.960 +of those more subjective guidelines +or more subjective rules? + +00:22:10.240 --> 00:22:13.840 +And how do you all think +that artificial intelligence + +00:22:13.840 --> 00:22:16.920 +algorithms or machine learning +based approaches + +00:22:17.680 --> 00:22:20.560 +can help us to assess conformance + +00:22:20.560 --> 00:22:24.280 +to those technical requirements +to to accessibility guidelines? + +00:22:25.240 --> 00:22:27.360 +And I'll start with you now, Yeliz. + +00:22:31.640 --> 00:22:32.560 +And thank you. + +00:22:32.560 --> 00:22:33.520 +Carlos. + +00:22:33.520 --> 00:22:38.440 +So regarding the conformance testing, + +00:22:38.440 --> 00:22:43.840 +so maybe we can actually think of this +as two kinds of problems. + +00:22:44.200 --> 00:22:49.080 +The one is the testing, the other one +is confirming basically repairing + +00:22:50.080 --> 00:22:53.800 +or automatically fixing the problems. + +00:22:54.040 --> 00:22:56.200 +So I see actually that + +00:22:56.920 --> 00:23:00.480 +machine learning and AI in general + +00:23:00.480 --> 00:23:04.080 +I think can help in both sides, +in both parties. + +00:23:04.520 --> 00:23:06.840 +So regarding the testing and auditing, +if we take, for example, + +00:23:06.840 --> 00:23:09.200 +So regarding the testing and auditing, +if we take, for example, + +00:23:09.200 --> 00:23:14.200 +WCAG evaluation methodology +as the most systematic methodology + +00:23:14.200 --> 00:23:16.400 +to evaluate for accessibility, + +00:23:17.560 --> 00:23:22.000 +it includes, for example, five stages, +five steps. + +00:23:22.400 --> 00:23:24.880 +So I think + +00:23:24.880 --> 00:23:28.400 +machine learning +can actually help us in certain steps. + +00:23:28.400 --> 00:23:31.560 +For example, it can help us to choose + +00:23:31.840 --> 00:23:36.080 +a representative sample, +which is the third step in WCAG-EM. + +00:23:36.800 --> 00:23:41.400 +We are currently doing some work on that +for example, to explore how to use + +00:23:42.040 --> 00:23:46.040 +unsupervised learning algorithms +to decide, for example, + +00:23:46.320 --> 00:23:50.760 +what is a representative sample +because Fabio, for example, mentioned + +00:23:50.760 --> 00:23:54.040 +the problem of evaluating a large scale + +00:23:54.480 --> 00:23:57.000 +websites with millions of pages. + +00:23:57.280 --> 00:24:01.200 +So how do you decide for example, +which ones to represent? + +00:24:01.200 --> 00:24:03.160 +I mean, which ones to evaluate? + +00:24:03.160 --> 00:24:06.400 +And do they really for example, + +00:24:06.400 --> 00:24:09.440 +if you evaluate some of them, + +00:24:09.440 --> 00:24:13.480 +how much of the sites +you actually cover, for example. + +00:24:13.800 --> 00:24:16.800 +So there I think machine learning +and AI can help. + +00:24:16.800 --> 00:24:19.520 +As I said, +we are currently doing some work on that, + +00:24:20.160 --> 00:24:24.160 +trying to explore machine +learning algorithms for choosing + +00:24:24.160 --> 00:24:28.120 +representative sample, +making sure that the pages that you are + +00:24:28.120 --> 00:24:33.400 +evaluating really represents +the site and reduces the workload. + +00:24:33.400 --> 00:24:38.160 +Because evaluating millions of pages, it's +not an easy task. + +00:24:38.160 --> 00:24:42.600 +So maybe we can pick certain sample pages +and once we evaluate them, + +00:24:42.600 --> 00:24:45.960 +we can transfer the knowledge +from those pages + +00:24:45.960 --> 00:24:49.440 +to the other ones +because more or less pages + +00:24:49.440 --> 00:24:53.720 +these days are developed with templates +or automatically developed. + +00:24:53.720 --> 00:24:59.560 +So maybe we can transfer the errors +we identified + +00:24:59.560 --> 00:25:02.640 +or the ways we are fixing to the others +which are representative. + +00:25:03.520 --> 00:25:06.240 +Regarding the step four in WCAG-EM... + +00:25:06.560 --> 00:25:10.680 +That's actually +about auditing the selected sample. + +00:25:10.680 --> 00:25:13.360 +So how do you evaluate +and test the sample? + +00:25:14.080 --> 00:25:16.480 +I think in that part + +00:25:16.480 --> 00:25:20.240 +as we all know, I mean Sheng mentioned +there are a lot of ... + +00:25:20.520 --> 00:25:24.040 +subjective rules +which they require human testing. + +00:25:24.440 --> 00:25:28.880 +So maybe there we need to explore more + +00:25:29.160 --> 00:25:33.360 +how people, +I mean how humans evaluate the certain + +00:25:34.840 --> 00:25:36.040 +requirements + +00:25:36.040 --> 00:25:39.840 +and how +we can actually automate those processes. + +00:25:39.840 --> 00:25:44.440 +So can we have machine learning algorithms +that learn from how people + +00:25:44.440 --> 00:25:48.120 +evaluate them, assess and implement those. + +00:25:48.400 --> 00:25:53.160 +But of course, as we mentioned +in the first part, data is critical + +00:25:53.400 --> 00:25:57.640 +valid data and quality of data +is very critical for those parts + +00:25:58.040 --> 00:26:02.840 +regarding the repairing +or automatically fixing certain problems. + +00:26:03.160 --> 00:26:07.360 +I still I also think that machine learning +algorithms can help. + +00:26:07.920 --> 00:26:10.720 +For example, regarding + +00:26:10.720 --> 00:26:14.400 +the images Sheng mentioned, +we can automatically test + +00:26:14.400 --> 00:26:18.560 +whether there is an alt text or not, +but not the quality of the alt text. + +00:26:18.880 --> 00:26:23.120 +So maybe there may be +we can explore more and + +00:26:24.440 --> 00:26:26.120 +do more about + +00:26:26.120 --> 00:26:29.440 +understanding +whether it's a good alt text or not + +00:26:29.640 --> 00:26:33.240 +and try to fix it +automatically by learning the + +00:26:34.240 --> 00:26:38.440 +from the context +and other aspects of the site. + +00:26:38.920 --> 00:26:43.920 +Or I've been doing, for example, +research in complex structures + +00:26:43.920 --> 00:26:47.320 +like tables, they are also very difficult +and challenging + +00:26:47.320 --> 00:26:50.200 +for accessibility, for testing +and for repairing. + +00:26:50.880 --> 00:26:54.280 +We've been doing, for example, +research in understanding + +00:26:54.280 --> 00:26:56.080 +whether we can differentiate + +00:26:56.080 --> 00:27:00.120 +and learn to differentiate +a layout table from a data table. + +00:27:00.560 --> 00:27:04.560 +And if it is a complex table, +can we actually, for example, learn + +00:27:04.720 --> 00:27:09.560 +how people are reading that +and guiding the repairing of those? + +00:27:10.440 --> 00:27:13.840 +We can, I guess, also do +similar things with the forms + +00:27:13.840 --> 00:27:17.200 +we can learn +how people are interacting with forms + +00:27:17.200 --> 00:27:22.440 +and try to some complex structures +like forms or rich and dynamic content. + +00:27:22.440 --> 00:27:24.200 +As Willian is working on. + +00:27:24.200 --> 00:27:29.760 +So maybe we can actually do, for example, +more work in there to automatically fix, + +00:27:30.440 --> 00:27:34.840 +which can be encoded in, let's say, +authoring tools or authoring environments + +00:27:34.840 --> 00:27:37.920 +that they include AI +without the developers + +00:27:37.920 --> 00:27:41.400 +noticing that they are actually +using AI to fix the problems. + +00:27:41.760 --> 00:27:44.320 +So just to wrap up, +I know I have a limited time + +00:27:44.600 --> 00:27:50.240 +just to wrap up, so I see that +ML can contribute in two things. + +00:27:50.240 --> 00:27:53.600 +Both testing and +repairing I think can help. + +00:27:55.000 --> 00:27:57.040 +I agree and + +00:27:57.040 --> 00:27:59.440 +some of the you things you mentioned +are really + +00:27:59.840 --> 00:28:03.560 +I guess they can be first steps. + +00:28:03.560 --> 00:28:07.360 +We can assist a human expert, + +00:28:07.360 --> 00:28:11.320 +the human evaluator, +and take away some of the load. + +00:28:11.360 --> 00:28:16.320 +And that's also what I, I take from +from your intervention. + +00:28:16.320 --> 00:28:19.000 +So, Fabio, +I would like your your take on this. + +00:28:22.960 --> 00:28:25.360 +I mean, actually + +00:28:25.360 --> 00:28:27.960 +I think I agree with what Yeliz said before. + +00:28:28.240 --> 00:28:31.960 +So first of all, +we have to be aware of the complexity + +00:28:32.240 --> 00:28:36.360 +of accessibility evaluation +because we could just think about + +00:28:36.360 --> 00:28:40.320 +WCAG 2.1, +which is composed of 78 success + +00:28:40.360 --> 00:28:43.640 +criteria, which are associated + +00:28:43.640 --> 00:28:47.000 +with some hundreds of techniques, + +00:28:47.000 --> 00:28:51.920 +of specific evaluation techniques. +This is the kind of statement that it seems like + +00:28:53.080 --> 00:28:56.040 +it is going to increase the number +of techniques... and so on... + +00:28:56.040 --> 00:29:01.440 +So the automatic support +is really fundamental. And let’s say... + +00:29:01.520 --> 00:29:05.160 +In general, when you use automatic +support, the result over the check + +00:29:05.200 --> 00:29:08.320 +would be okay, these are a pass... +No, it fails + +00:29:08.680 --> 00:29:09.200 +And the other one is +cannot tell + +00:29:10.680 --> 00:29:12.800 +So one possibility. + +00:29:12.800 --> 00:29:18.360 +I think that can be interesting... +how to exploit machine learning + +00:29:18.480 --> 00:29:21.280 +in the situation which... +you know... the automatic + +00:29:22.000 --> 00:29:25.240 +solution is not able to +deterministically provide + +00:29:25.480 --> 00:29:30.800 +okay or fail. I mean, so these could be +an interesting opportunity + +00:29:31.040 --> 00:29:35.320 +which was also explored in the +WADCHER European project. + +00:29:35.320 --> 00:29:38.200 +So, in this case the idea was to allow + +00:29:38.320 --> 00:29:40.360 +an accessibility validator + +00:29:41.480 --> 00:29:43.480 +human accessibility expert + +00:29:43.480 --> 00:29:47.920 +in this case to provide the input +and then to try to use this input + +00:29:48.280 --> 00:29:51.240 +in order to train +the intelligent system + +00:29:52.360 --> 00:29:54.760 +then it was not possible to extend it to + +00:29:54.800 --> 00:29:58.120 +to validate these solutions. But, + +00:29:58.160 --> 00:30:02.320 +for sure, for example, if I think about... +it’s really easy automatically to detect + +00:30:02.680 --> 00:30:05.040 +weather or not the +alternative description exist. + +00:30:05.480 --> 00:30:08.680 +It must much more difficult +to say whether it is meaningful. + +00:30:09.760 --> 00:30:11.840 +So, in this case, for example, + +00:30:11.840 --> 00:30:15.320 +I have seen... also before it’s been +mentioned... a lot of improvements in + +00:30:15.640 --> 00:30:18.000 +AI applied to recognizing + +00:30:18.400 --> 00:30:20.920 +in images what the content is. + +00:30:21.320 --> 00:30:25.120 +So I have also seen that there's +some attempt in this direction + +00:30:25.120 --> 00:30:28.360 +has been performed, +so we can think of situation in which + +00:30:29.000 --> 00:30:32.480 +the AI take +the image provides the descriptors + +00:30:32.880 --> 00:30:36.960 +and then there is a kind of a similarity +check, between these automatically + +00:30:37.000 --> 00:30:40.840 +generated descriptions, the one +that has been provided by the developer, + +00:30:40.840 --> 00:30:47.480 +and see whether to some extent +is meaningful. These, I think, is something + +00:30:47.600 --> 00:30:54.000 +which is possible. What I’m not sure is +how much we can find a general solution + +00:30:54.040 --> 00:30:57.840 +so, a solution that can always work. +I mean, so, I can see that this kind of AI + +00:30:57.840 --> 00:31:00.960 +probably will be +associated with some level of + +00:31:01.640 --> 00:31:05.080 +confidence and then, I think, +at this point we can also think of + +00:31:06.120 --> 00:31:06.560 +leaving to the + +00:31:06.560 --> 00:31:10.120 +end user decide what should be +the level of confidence + +00:31:10.120 --> 00:31:13.240 +that is acceptable +when, you know, this automatic + +00:31:13.240 --> 00:31:16.200 +support is used to +understand the way that + +00:31:16.840 --> 00:31:19.920 +the description, the alternative +description, is meaningful. + +00:31:19.920 --> 00:31:22.320 +So that would be the direction +where I would + +00:31:22.360 --> 00:31:26.240 +try, I mean, from the perspective +of people who work on tools + +00:31:26.240 --> 00:31:30.120 +for automatic validation +and try to, you know, introduce + +00:31:30.280 --> 00:31:32.760 +AI inside such + +00:31:33.240 --> 00:31:35.960 +automatic frameworks. +But another + +00:31:36.040 --> 00:31:40.400 +key point that we have to be careful +is the transparency. + +00:31:40.440 --> 00:31:42.960 +I mean, when we talk about AI +we often say + +00:31:44.320 --> 00:31:45.160 +about the problem +of the black box. + +00:31:45.160 --> 00:31:49.680 +There is a lot of discussion +about explainable AI. In explainable + +00:31:49.720 --> 00:31:54.520 +AI, usually people try to say “oh the AI is +not able to explain why this element + +00:31:54.880 --> 00:31:59.920 +generated this result” or how can a change +in this element, you know, obtained a different result. + +00:31:59.960 --> 00:32:01.480 +What happens if a change + +00:32:02.480 --> 00:32:03.000 +is handled this way. + +00:32:03.280 --> 00:32:05.520 +So these, let’s say, +questions in XAI + +00:32:06.560 --> 00:32:09.760 +are also the questions +that people encounter + +00:32:09.760 --> 00:32:13.920 +when they have to interact with +an evaluation tool. + +00:32:13.920 --> 00:32:17.760 +And also, there is simply a study +about the transparency of the tool. + +00:32:17.800 --> 00:32:20.960 +So what about these tools +that we have now available. + +00:32:21.160 --> 00:32:21.880 +It was published + +00:32:21.880 --> 00:32:24.920 +in ACM Transactions on +Accessible Computing. + +00:32:25.280 --> 00:32:26.920 +And it turned out that + +00:32:27.280 --> 00:32:32.520 +even without AI, often +these tools are a little bit black boxes. + +00:32:32.520 --> 00:32:34.720 +They’re not sufficiently +transparent, so, + +00:32:34.720 --> 00:32:38.360 +for example, they say, +we support this success criterion + +00:32:38.360 --> 00:32:42.600 +but did not say which technique they +actually apply for the purpose. + +00:32:42.640 --> 00:32:47.160 +How these techniques +are implemented. + +00:32:47.920 --> 00:32:50.880 +So, let’s say, that often users +are disoriented because + +00:32:51.440 --> 00:32:54.000 +they use different tools +they get different results + +00:32:54.280 --> 00:32:57.800 +they do not understand +the reason of such differences. + +00:32:58.000 --> 00:33:01.240 +So let's say that this +point of transparency is already + +00:33:01.240 --> 00:33:06.920 +fundamental now that usually such +validation tools do not use AI + +00:33:07.960 --> 00:33:08.880 +we have to be careful that + +00:33:08.880 --> 00:33:12.720 +if we add AI, should be added +in such a way that is explainable + +00:33:13.240 --> 00:33:17.360 +so that we can help people to better +understand what happens in the evaluation + +00:33:17.360 --> 00:33:22.720 +and not, you know, just giving results +that we take as a + +00:33:23.360 --> 00:33:28.040 +right without any sufficient explanation. + +00:33:28.040 --> 00:33:30.680 +Yeah, +I think that's a very important point + +00:33:30.680 --> 00:33:34.360 +because if I'm a developer +and I'm trying to solve + +00:33:34.840 --> 00:33:38.680 +accessibility issues, I need to understand +why is there an error... + +00:33:38.720 --> 00:33:41.080 +not just that +there is an error, over there. + +00:33:41.320 --> 00:33:44.960 +So yeah, that's, that's a very important, +very important point. + +00:33:44.960 --> 00:33:45.240 +Thank you, Fabio. + +00:33:45.240 --> 00:33:47.680 +So, Sheng, next to you. + +00:33:48.960 --> 00:33:50.920 +Okay. Thanks. + +00:33:50.920 --> 00:33:53.520 +And considering the incorporating + +00:33:53.520 --> 00:33:58.040 +the artificial intelligence, +I will try to find some way in + +00:33:58.040 --> 00:33:59.920 +help the developers + +00:33:59.920 --> 00:34:03.480 +so the first one is the code +generation for automatically + +00:34:03.480 --> 00:34:08.040 +fixing the accessibility problems. +As Yilez just + +00:34:08.040 --> 00:34:13.080 +said... web accessibility +evaluation has been studied, but + +00:34:14.320 --> 00:34:15.680 +we have to stand + +00:34:15.680 --> 00:34:18.640 +at the view of the developers. + +00:34:19.440 --> 00:34:22.480 +If the evaluation system +only identify or locate + +00:34:22.480 --> 00:34:25.720 +locate the accessibility problem, + +00:34:27.000 --> 00:34:30.680 +it may be still hard for developers +to fix these problems. + +00:34:30.680 --> 00:34:34.720 +Things, some developers may lack +experience on this, + +00:34:34.720 --> 00:34:38.520 +and recently the artificial +intelligence based code + +00:34:38.520 --> 00:34:42.760 +generation has been well +developed and given some + +00:34:43.720 --> 00:34:46.880 +historical code on fixing +accessibility problems + +00:34:47.080 --> 00:34:50.560 +we have tried to train +artificial intelligence model + +00:34:50.600 --> 00:34:54.080 +to automatically detect +the problem linked to a code snip + +00:34:54.080 --> 00:34:57.680 +and to provide suggestions +for the developers. + +00:34:57.920 --> 00:35:01.520 +We expect that this function +could help the developers fix + +00:35:01.520 --> 00:35:04.600 +the accessibility problem and improve + +00:35:04.640 --> 00:35:07.240 +their websites more efficiently. + +00:35:07.800 --> 00:35:10.280 +And the second way is for the developers + +00:35:10.280 --> 00:35:13.520 +is about the content generation. + +00:35:13.520 --> 00:35:17.560 +As as discussed in the panel yesterday, +there has been a + +00:35:17.600 --> 00:35:21.960 +there have been several attempts +in generating text alternates + +00:35:22.240 --> 00:35:26.400 +for images or videos with the help +of the computation of NLP + +00:35:26.640 --> 00:35:27.160 +techniques. + +00:35:28.480 --> 00:35:29.120 +However, + +00:35:29.120 --> 00:35:33.160 +it may be not very practical +for the image generators + +00:35:33.720 --> 00:35:38.960 +to provide the text alternates since the +state of the art methods usually requires + +00:35:39.080 --> 00:35:42.400 +large models that are deployed on + +00:35:42.400 --> 00:35:44.640 +GPU servers which is not... + +00:35:45.400 --> 00:35:48.840 +which is not convenient +for frequently updated images. + +00:35:49.440 --> 00:35:52.200 +So recently we have been working + +00:35:52.200 --> 00:35:57.520 +on some knowledge +distillation methods, which aims at a + +00:35:57.760 --> 00:36:02.680 +distill lightweight model +from a large model + +00:36:02.920 --> 00:36:07.120 +and we want to develop a lightweight +artificial intelligence models + +00:36:07.160 --> 00:36:12.680 +that can be deployed in the... browser +extension or some lightweight + +00:36:12.680 --> 00:36:14.080 +software. + +00:36:14.080 --> 00:36:17.800 +We hope to reduce the time cost +and the computation + +00:36:17.880 --> 00:36:22.200 +cost of image providers +and encourage them to conform + +00:36:22.400 --> 00:36:25.440 +the accessibility technical requirements. + +00:36:25.960 --> 00:36:27.560 +Okay. Thank you. + +00:36:27.560 --> 00:36:28.080 +Thank you. + +00:36:28.080 --> 00:36:31.120 +That's another very relevant points. + +00:36:31.200 --> 00:36:35.080 +Make sure that whatever new techniques +we develop + +00:36:35.080 --> 00:36:39.560 +are really accessible +to those who need to to use them. + +00:36:39.560 --> 00:36:43.040 +And so the +the computational resources are also + +00:36:44.360 --> 00:36:46.480 +a very +important aspect to take into account. + +00:36:47.120 --> 00:36:50.440 +And so, Willian next your take on this, + +00:36:50.720 --> 00:36:52.000 +please. + +00:36:52.000 --> 00:36:53.240 +Okay. Okay. + +00:36:54.160 --> 00:36:58.960 +Well, first, I would like to take +from what Yeliz said that we + +00:36:58.960 --> 00:37:03.960 +we have basically I it's nice to see +that everyone is agreeing with everything + +00:37:03.960 --> 00:37:08.280 +that has been said... is like we +we talked before but we didn’t + +00:37:08.320 --> 00:37:09.200 +we didn't talk at all + +00:37:09.200 --> 00:37:14.440 +and so it's nice to see that +everyone is having the same problems and + +00:37:16.000 --> 00:37:18.560 +about what Yeliz said that she divided + +00:37:18.880 --> 00:37:21.720 +the work of +automatic evaluation in three steps. + +00:37:21.960 --> 00:37:24.560 +The first one is testing +and the second one is + +00:37:25.000 --> 00:37:28.240 +automatically repairing +accessibility on websites. + +00:37:29.080 --> 00:37:31.400 +From my end and specifically, + +00:37:31.400 --> 00:37:34.800 +I don't work with something that is, + +00:37:35.560 --> 00:37:37.880 +I will say + +00:37:37.880 --> 00:37:40.840 +subjective like image content generation. + +00:37:41.360 --> 00:37:45.760 +I... my work mostly +focused on identifying widgets. + +00:37:45.880 --> 00:37:47.920 +And this is kind of objective, right? + +00:37:48.120 --> 00:37:50.840 +It's a dropdown. +It's not a toolkit. + +00:37:51.280 --> 00:37:53.840 +This is something that I don't need +to worry + +00:37:53.840 --> 00:37:57.280 +about being sued over a bad +classification or something else. + +00:37:58.000 --> 00:38:00.960 +So... this is a different + +00:38:01.200 --> 00:38:05.320 +aspect of accessibility that I work on +and specifically my end + +00:38:05.320 --> 00:38:09.000 +I work with supervised +learning as everyone and... + +00:38:09.280 --> 00:38:12.120 +classifying DOM elements as specific + +00:38:12.920 --> 00:38:15.480 +components, interface components. + +00:38:15.480 --> 00:38:20.320 +I, I use features extracted +from the DOM structure. So + +00:38:22.400 --> 00:38:23.360 +I think everyone + +00:38:23.360 --> 00:38:25.720 +mentioned this, Sheng mentioned it as well. + +00:38:26.440 --> 00:38:30.840 +Yeliz mentioned it in the question +about tables and everything else and + +00:38:32.080 --> 00:38:34.120 +I'm trying to use data + +00:38:36.080 --> 00:38:40.040 +from websites +that I evaluate as accessible + +00:38:41.200 --> 00:38:44.960 +to enhance the accessibility of websites + +00:38:44.960 --> 00:38:48.360 +that I don't... that don't +implement these requirements. + +00:38:48.360 --> 00:38:49.240 +For instance, + +00:38:49.240 --> 00:38:53.680 +I see a website that implements rules, +that implements the ARIA specification. + +00:38:53.680 --> 00:38:54.800 +So I use it. + +00:38:54.800 --> 00:39:00.520 +I extract data from it to to +maybe apply it in a website + +00:39:00.520 --> 00:39:04.080 +that doesn’t. This is kind of the, +the work that I'm working, + +00:39:05.120 --> 00:39:07.920 +this is kind of what I'm doing right now. + +00:39:07.920 --> 00:39:12.360 +And... there is another thing. + +00:39:14.680 --> 00:39:15.440 +So... + +00:39:16.280 --> 00:39:18.840 +Fabio also mentioned the question +about confidence. + +00:39:19.240 --> 00:39:23.120 +I think this is kind of critical for us +in terms of machine learning. + +00:39:23.120 --> 00:39:26.280 +I think the word that we use +usually is accuracy + +00:39:27.160 --> 00:39:29.920 +and I believe that what will guide + +00:39:30.680 --> 00:39:35.480 +each of us as researchers, +whether we work on tests + +00:39:35.480 --> 00:39:40.600 +or automatic repair, is basically +the accuracy of our methodologies. + +00:39:40.600 --> 00:39:41.400 +If I have + +00:39:42.520 --> 00:39:43.400 +a lower + +00:39:43.400 --> 00:39:47.680 +accuracy problem, +I will use a testing approach. + +00:39:47.960 --> 00:39:51.080 +Otherwise, I will try to +automatically repair the webpage. + +00:39:51.360 --> 00:39:56.560 +Of course, the best result we can get +is automatic repair. + +00:39:56.560 --> 00:39:59.760 +This is what will scale +better for our users. + +00:39:59.760 --> 00:40:03.400 +This is what will benefit more users + +00:40:03.400 --> 00:40:07.960 +in terms of scale. + +00:40:07.960 --> 00:40:11.800 +I think that it, Carlos. Everyone talked +about everything that I wanted to say, + +00:40:11.800 --> 00:40:14.160 +so this is mostly +what I would say different. + +00:40:14.160 --> 00:40:16.360 +So this is nice. Okay. + +00:40:16.960 --> 00:40:20.160 +Still, let me just + +00:40:21.520 --> 00:40:24.000 +a small provocation. + +00:40:24.000 --> 00:40:26.160 +You said that you were + +00:40:26.920 --> 00:40:30.400 +everything that you work in +widget identification is objective. + +00:40:30.400 --> 00:40:34.720 +I will disagree a little bit +and I'm sure we can find several + +00:40:34.720 --> 00:40:38.120 +examples of pages where you don't know +if that's a link or a button. + +00:40:38.800 --> 00:40:43.120 +It's so there can be subjectivity in there +also. + +00:40:44.080 --> 00:40:47.800 +So yeah, but just that, +just a small provocation, as I say. + +00:40:48.280 --> 00:40:50.640 +So we are fast approaching. + +00:40:51.040 --> 00:40:51.520 +Yeah. + +00:40:51.520 --> 00:40:52.520 +When + +00:40:52.520 --> 00:40:56.720 +the conversation is good, time flies by +so we are fast approaching the end. + +00:40:56.720 --> 00:40:59.680 +So I will ask you to just quickly + +00:40:59.920 --> 00:41:04.200 +comment on a final aspect, +just one minute or two. + +00:41:04.200 --> 00:41:08.440 +So please try to, to stick to that +so that we don't go over time + +00:41:09.040 --> 00:41:13.520 +and just you've already been in some ways + +00:41:13.520 --> 00:41:17.240 +approaching this, +but just what do you expect? + +00:41:17.560 --> 00:41:19.640 +What would be +one of the major contributions? + +00:41:19.640 --> 00:41:23.720 +What are your future perspectives +about the use of machine + +00:41:23.720 --> 00:41:26.720 +learning techniques +for web accessibility evaluation? + +00:41:27.440 --> 00:41:28.960 +And I will start with you now, Fabio. + +00:41:32.760 --> 00:41:35.640 +Okay, I mean, if I think + +00:41:35.640 --> 00:41:40.160 +about a couple of interesting, +you know, possibilities, + +00:41:40.160 --> 00:41:43.760 +open up by +machine learning, I mean, + +00:41:44.280 --> 00:41:46.960 +you know.... when we.... +when we have a user interface... + +00:41:47.520 --> 00:41:50.080 +generally speaking we +have two possible approaches. + +00:41:50.080 --> 00:41:55.480 +So one is to look at the code, +the associated generic interface + +00:41:55.480 --> 00:41:59.520 +and see whether it is compliant +with some rules. In other approaches + +00:41:59.520 --> 00:42:02.600 +to look at how people interact +with the system. + +00:42:02.600 --> 00:42:06.120 +So to look at the logs of +user interaction. + +00:42:06.640 --> 00:42:12.080 +And so, in the past we did some work +where we created a tool to identify + +00:42:12.120 --> 00:42:14.520 +bad usability smells, +which means + +00:42:16.680 --> 00:42:19.880 +patterns of interaction that highlight +there is some usability problems. + +00:42:19.960 --> 00:42:24.720 +So for example, we look at mobile devices +when there are a lot of pinch out, pinch in, + +00:42:25.040 --> 00:42:28.360 +that means that probably the +information is not well presented or + +00:42:28.600 --> 00:42:32.320 +when people access continuously different +links it means the links are too close, I mean... + +00:42:32.840 --> 00:42:37.120 +so it's possible to identify +sequences of interaction that highlight + +00:42:37.120 --> 00:42:40.000 +there is a usability problem. +So, one possibility, you know... + +00:42:40.280 --> 00:42:43.320 +is to use some kind of machine +learning for classifying + +00:42:44.200 --> 00:42:48.400 +interaction with some +assistive technology + +00:42:48.400 --> 00:42:52.360 +that highlighted this kind of problems... +that allow us from the data + +00:42:52.360 --> 00:42:55.360 +to use experience that +there are some specific + +00:42:55.760 --> 00:42:57.920 +accessibility problems. + +00:42:58.600 --> 00:43:01.560 +And... the second one... is about... + +00:43:01.680 --> 00:43:06.000 +we mentioned before the importance +of providing explanation + +00:43:06.000 --> 00:43:10.240 +about a problem or why +it is a problem and how to solve it. + +00:43:10.880 --> 00:43:13.960 +So I think that would be +the idea + +00:43:14.600 --> 00:43:18.440 +in theory.... an idea application +for a conversational agent. + +00:43:18.520 --> 00:43:22.880 +Now there is a lot if discussion, +for example, around ChatGPT + +00:43:24.200 --> 00:43:25.240 +but + +00:43:25.240 --> 00:43:28.480 +it’s very difficult, you know, +to actually design + +00:43:28.480 --> 00:43:33.480 +this case... a conversational agent that +is able to take into account + +00:43:33.480 --> 00:43:38.080 +the relevant context, which in +this case is the type of user + +00:43:38.080 --> 00:43:42.480 +that is actually now asking for help, +because there are really many types of users + +00:43:42.480 --> 00:43:46.480 +when people look at accessibility results, +you know, that can be the web + +00:43:46.480 --> 00:43:50.600 +commission with the person who has decided +to have a service but don’t know anything + +00:43:50.600 --> 00:43:52.640 +about its implementation, +and it can be + +00:43:53.040 --> 00:43:56.760 +the user, the disabled user, +the developer, the accessibility expert. + +00:43:56.760 --> 00:44:02.680 +Each of them require a different +language, different terms, different + +00:44:02.680 --> 00:44:06.720 +type of explanation, +because when they look at... is this + +00:44:06.840 --> 00:44:09.640 +website accessible, +they really have different criteria + +00:44:10.920 --> 00:44:13.480 +to understand +the level of accessibility + +00:44:13.480 --> 00:44:17.440 +and how to, then, operate +in order to improve it. + +00:44:18.200 --> 00:44:21.160 +So, this is one dimension +of the complexity. + +00:44:22.000 --> 00:44:25.360 +The other dimension of the complexity +is the actual implementation. + +00:44:25.560 --> 00:44:30.440 +It's really... we have... in this experience we +are conducting in our laboratory + +00:44:30.520 --> 00:44:35.160 +with these large scale validation.... +ten thousand websites... it was really amazing + +00:44:35.160 --> 00:44:41.040 +to see how different, you know, implementation +languages... technical context... + +00:44:41.080 --> 00:44:42.440 +people have used in order to + +00:44:43.600 --> 00:44:45.560 +implement the website. + +00:44:45.560 --> 00:44:47.920 +I mean, even people who +have used the same + +00:44:47.920 --> 00:44:50.440 +JavaScript frameworks, they can use them +in very different ways + +00:44:50.920 --> 00:44:52.240 +and so on. + +00:44:52.240 --> 00:44:55.960 +So when you want to +provide an explanation + +00:44:57.480 --> 00:45:00.120 +often it’s disappointing just providing an understanding + +00:45:00.400 --> 00:45:03.480 +a description of the errors... +some standard examples + +00:45:03.480 --> 00:45:07.520 +of how to solve the problem because often + +00:45:07.800 --> 00:45:11.160 +there are different situations +that require some specific + +00:45:11.160 --> 00:45:14.920 +additional consideration for +better explaining + +00:45:15.200 --> 00:45:19.480 +how that error occurred, +and what can be done in order to solve it. + +00:45:20.240 --> 00:45:26.200 +But this part... this complexity... a good +conversational agent for accessibility + +00:45:26.200 --> 00:45:29.080 +would be a great result. + +00:45:29.360 --> 00:45:30.320 +Thank you. + +00:45:30.680 --> 00:45:33.280 +Sheng, you want to go next? + +00:45:33.280 --> 00:45:35.880 +Okay so so time is limited. + +00:45:35.880 --> 00:45:37.440 +I will save time. + +00:45:37.440 --> 00:45:39.480 +I will talk about the future + +00:45:39.760 --> 00:45:43.240 +perspective about the +efficient page sampling. + +00:45:43.720 --> 00:45:48.360 +According our data analyzed, +we find that the page... the web pages + +00:45:48.400 --> 00:45:52.080 +that with similar connection +structure with other pages, + +00:45:52.080 --> 00:45:56.200 +it usually have +some similar accessibility problem. + +00:45:56.440 --> 00:45:59.000 +So we tried to take this into... + +00:45:59.320 --> 00:46:04.000 +take this into account +for the accessibility evaluation. + +00:46:04.360 --> 00:46:07.480 +And recently we used the graph +neural networks, + +00:46:07.720 --> 00:46:12.040 +which has been a hot research +topic in machine learning community. + +00:46:12.520 --> 00:46:16.360 +It combines both the network topology +and the node, the attributes + +00:46:17.080 --> 00:46:19.480 +and the unified representation +for each node. + +00:46:19.840 --> 00:46:27.480 +And here each node + +00:46:27.480 --> 00:46:30.640 +Okay, I guess we lost Sheng again. + +00:46:30.640 --> 00:46:35.320 +So in the interest of time +I will skip immediately to you, + +00:46:35.320 --> 00:46:39.560 +Willian. + +00:46:39.560 --> 00:46:40.240 +Okay. See, + +00:46:42.040 --> 00:46:42.680 +my take on this + +00:46:42.680 --> 00:46:44.840 +I think it will be... pretty direct. + +00:46:44.840 --> 00:46:49.360 +I, I think Fabio will talk about it, +but we are all working + +00:46:49.360 --> 00:46:52.640 +with specific guidelines +inside of a set of guidelines + +00:46:52.680 --> 00:46:55.040 +of accessibility guidelines, +of WCAG. + +00:46:55.040 --> 00:46:58.200 +And I think the the + +00:46:59.040 --> 00:47:03.760 +the next step that we should address +is associated with generalization + +00:47:04.280 --> 00:47:09.160 +and incorporating into project +ready projects into the project + +00:47:09.160 --> 00:47:12.960 +that's just incorporated in +any automatic evaluation tool. + +00:47:13.840 --> 00:47:18.640 +And so in regards to all the problems + +00:47:18.640 --> 00:47:22.000 +that we mentioned, associated to data +acquisition, manual classification, + +00:47:22.560 --> 00:47:26.880 +we had to find a way +to scale our experiments + +00:47:26.880 --> 00:47:30.600 +so that we can guarantee +that it will work in any + +00:47:31.480 --> 00:47:34.360 +theme or website. + +00:47:34.360 --> 00:47:39.280 +I in regards to my research specifically, +I think there are some I'm + +00:47:39.280 --> 00:47:43.080 +trying to work in an automated generation +of the structure for websites. + +00:47:43.240 --> 00:47:47.760 +For instance, generating +header structures and other + +00:47:48.480 --> 00:47:51.360 +specific structures that the user can use + +00:47:51.680 --> 00:47:54.720 +to practically... automatically enhance + +00:47:55.360 --> 00:47:57.920 +the web accessibility of web pages + +00:47:57.920 --> 00:48:01.280 +And I think I think that's it. + +00:48:01.440 --> 00:48:05.480 +In regards to what you said, Carlos, +just so that I can clear myself, + +00:48:05.920 --> 00:48:09.920 +I... what I wanted to say +is that... different from the panelists + +00:48:09.920 --> 00:48:11.920 +from yesterday and different from Chaoai, + +00:48:11.920 --> 00:48:15.000 +for instance, I think I'm working with + +00:48:16.280 --> 00:48:18.280 +a simpler + +00:48:19.000 --> 00:48:20.080 +machine learning approach. + +00:48:20.080 --> 00:48:24.920 +I don't use deep learning, for instance, +and since I don't see the + +00:48:25.920 --> 00:48:28.600 +the use for it yet in my research + +00:48:28.920 --> 00:48:29.680 +because my research + +00:48:29.680 --> 00:48:33.640 +I think Yeliz mentioned that she +she might use for labeling + +00:48:33.640 --> 00:48:38.120 +and other stuff... like generation +and I haven't reached that point yet. + +00:48:38.120 --> 00:48:43.120 +I think there are some a lot of things +that we can do with just with classification, + +00:48:43.120 --> 00:48:44.160 +for instance. + +00:48:44.800 --> 00:48:47.080 +That's it. +Okay. Thank you. + +00:48:47.080 --> 00:48:49.440 +And Yeliz, you want to conclude? + +00:48:50.680 --> 00:48:53.080 +Yes, I actually + +00:48:53.080 --> 00:48:58.000 +at least I hope that we will see +developments again in two things. + +00:48:58.000 --> 00:49:01.840 +I think the first one +is automated testing. + +00:49:01.840 --> 00:49:07.760 +I think we’re now at this stage +that we have many tools and we know how + +00:49:07.760 --> 00:49:12.840 +to implement and automate certain, +for example, certain guidelines. + +00:49:13.120 --> 00:49:18.840 +But there are a lot of bunch of others +that they are very objective. + +00:49:19.160 --> 00:49:21.520 +They require human evaluation. + +00:49:21.760 --> 00:49:23.920 +It's very costly and expensive. + +00:49:23.920 --> 00:49:26.400 +I think, from evaluation perspective. + +00:49:26.760 --> 00:49:31.080 +So I'm hoping that there will be +developments in machine learning + +00:49:31.080 --> 00:49:36.880 +and AI algorithms to support +and have more automation in those ones + +00:49:37.120 --> 00:49:40.840 +that are really now requires the human + +00:49:42.040 --> 00:49:43.960 +to do the evaluations. + +00:49:43.960 --> 00:49:46.720 +And the other one is about the repairing. + +00:49:46.960 --> 00:49:49.960 +So I'm also hoping +that we will also see developments + +00:49:49.960 --> 00:49:56.160 +in automating the kind +of fixing the problems, automatically, + +00:49:56.720 --> 00:50:01.480 +learning from the good examples +and being able to develop solutions + +00:50:02.000 --> 00:50:06.640 +while the pages are developed, +they are actually automatically fixed + +00:50:06.640 --> 00:50:09.680 +and sometimes may be seamless +to the developers + +00:50:09.960 --> 00:50:15.280 +so that they are not worried about the, +you know, certain issues. + +00:50:15.280 --> 00:50:20.840 +Of course, Explainability +is very important to explain developers + +00:50:20.840 --> 00:50:24.280 +what's going on, +but I think automating certain things + +00:50:24.280 --> 00:50:27.480 +there would really help +automating the repairment. + +00:50:28.320 --> 00:50:31.440 +Of course, to do that, +I think we need datasets + +00:50:31.440 --> 00:50:34.640 +and maybe hopefully in the community +we will have shared datasets + +00:50:34.640 --> 00:50:38.800 +that we can all work with +and explore different algorithms. + +00:50:39.040 --> 00:50:40.480 +As we know it's costly. + +00:50:40.480 --> 00:50:43.600 +So exploring and doing research + +00:50:43.600 --> 00:50:47.200 +with existing data, it helps a lot. + +00:50:47.480 --> 00:50:52.600 +So I'm hoping that in the community +we will see public datasets and of course + +00:50:53.560 --> 00:50:56.440 +the technical skills are very important. + +00:50:56.440 --> 00:51:01.440 +So human centered A.I., +which is needed here I think is important. + +00:51:01.440 --> 00:51:03.640 +So hopefully we will also see more people + +00:51:04.160 --> 00:51:07.520 +contributing to that +and the the development. + +00:51:07.840 --> 00:51:10.960 +And of course, we should always remember, +as Jutta + +00:51:10.960 --> 00:51:14.040 +was mentioning yesterday, +the bias is critical. + +00:51:14.280 --> 00:51:18.280 +So when we are talking about, for example, +automatically testing certain, + +00:51:18.280 --> 00:51:22.760 +automating the test of certain rules, +we should make sure that we are + +00:51:22.760 --> 00:51:27.360 +not biasing certain user groups +and we are really targeting everybody + +00:51:27.360 --> 00:51:31.240 +and different user +groups, different needs and users. + +00:51:31.440 --> 00:51:34.120 +So that's all I wanted to say. + +00:51:34.120 --> 00:51:38.160 +Thank you so much, Yeliz. +And for bringing also that note to too. + +00:51:38.480 --> 00:51:41.240 +I think it was a great way to finish this. + +00:51:41.240 --> 00:51:42.680 +This panel. + +00:51:42.680 --> 00:51:46.040 +So thank you so much +to the four of you. + +00:51:46.240 --> 00:51:49.520 +Really interesting to see +all of those perspectives and what you + +00:51:50.440 --> 00:51:53.120 +what you're working on +and what you're planning + +00:51:53.440 --> 00:51:56.440 +on doing so in the next + +00:51:58.000 --> 00:51:58.560 +years. + +00:51:58.560 --> 00:51:59.640 +I guess + +00:52:00.880 --> 00:52:02.320 +let me just draw your attention. + +00:52:02.320 --> 00:52:05.680 +There are several +interesting questions on the Q&A. + +00:52:05.680 --> 00:52:10.360 +So if you do have a chance, +try to answer them there. + +00:52:10.360 --> 00:52:15.200 +We unfortunately didn't have time to +to get to those during our panel. + +00:52:15.760 --> 00:52:19.520 +But I think there are and there are some +that really have your names on it. + +00:52:20.040 --> 00:52:23.400 +So you're exactly the + +00:52:23.840 --> 00:52:26.200 +the correct person to answer those. + +00:52:26.800 --> 00:52:31.320 +So once again, thank you so much for +for your participation was great + +00:52:31.720 --> 00:52:35.480 +and I will now have a shorter break + +00:52:35.480 --> 00:52:40.120 +than the 10 minutes and has +and will be back in 5 minutes. + +00:52:40.120 --> 00:52:44.040 +So 5 minutes past the hour. + diff --git a/pages/about/projects/wai-coop/symposium2-captions/media_accessibility.vtt b/pages/about/projects/wai-coop/symposium2-captions/media_accessibility.vtt new file mode 100644 index 00000000000..4f34239104f --- /dev/null +++ b/pages/about/projects/wai-coop/symposium2-captions/media_accessibility.vtt @@ -0,0 +1,2571 @@ +WEBVTT +Kind: captions +Language: en + +00:00:00.000 --> 00:00:02.960 +Welcome you all to the second panel + +00:00:04.680 --> 00:00:06.720 +of the first day and + +00:00:09.280 --> 00:00:10.480 +this panel + +00:00:11.000 --> 00:00:14.400 +will aim to discuss + +00:00:14.960 --> 00:00:19.440 +the current status of natural language +processing techniques. + +00:00:20.000 --> 00:00:25.200 +And during the context of the +the web, we can think + +00:00:25.480 --> 00:00:27.680 +or we know that they can be used + +00:00:28.200 --> 00:00:31.440 +to generate +textual descriptions for images, + +00:00:31.440 --> 00:00:36.440 +but also for other visual media +that's presented on web pages + +00:00:37.160 --> 00:00:40.880 +and we will focus today our discussion on + +00:00:41.840 --> 00:00:45.480 +or start to consider aspects +such as text adaptation + +00:00:45.480 --> 00:00:51.480 +and how to provide understandable text +to better meet the web user needs. + +00:00:51.480 --> 00:00:55.880 +And they're in different contexts of +of use and also what + +00:00:56.160 --> 00:01:00.000 +our future perspective +for natural language + +00:01:00.000 --> 00:01:03.240 +processing on web accessibility + +00:01:03.240 --> 00:01:05.400 +are to support web accessibility. + +00:01:06.720 --> 00:01:09.600 +So I'm glad to welcome back + +00:01:10.720 --> 00:01:13.000 +Michael, Shivam and Amy. + +00:01:13.200 --> 00:01:16.320 +And when + +00:01:16.320 --> 00:01:18.840 +you are Amy and also + +00:01:19.560 --> 00:01:23.600 +to welcome Shaomei Wu from AImpower + +00:01:26.160 --> 00:01:27.480 +who agreed to join + +00:01:27.480 --> 00:01:30.920 +us on the second panel of the day. + +00:01:31.360 --> 00:01:33.840 +Welcome +you all back and welcome Shaomei. + +00:01:34.760 --> 00:01:36.960 +For your first intervention + +00:01:36.960 --> 00:01:41.400 +I ask you +just to briefly introduce yourself + +00:01:42.000 --> 00:01:44.240 +and your three other co panelists + +00:01:45.440 --> 00:01:47.600 +already done that on the previous panel. + +00:01:47.600 --> 00:01:51.400 +So no need to +to reintroduce yourselves. + +00:01:52.800 --> 00:01:57.520 +But I will start by + +00:01:59.840 --> 00:02:02.760 +thinking about once again the quality. + +00:02:02.760 --> 00:02:05.560 +So we go back to the to the quality topic, + +00:02:06.240 --> 00:02:09.040 +but now the quality of +machine generated descriptions + +00:02:11.040 --> 00:02:13.920 +now no longer +from the perspective of image processing, + +00:02:14.200 --> 00:02:18.240 +but from the perspective +of the natural language generation, right? + +00:02:18.240 --> 00:02:21.480 +So how can we improve the quality + +00:02:21.480 --> 00:02:26.760 +of this machine generated descriptions, +especially taking into account + +00:02:26.880 --> 00:02:30.320 +the personalized preferences from users? + +00:02:30.760 --> 00:02:33.360 +And I will start with you Shaomei + +00:02:36.720 --> 00:02:39.360 +Hello. So + +00:02:41.880 --> 00:02:44.160 +thank you all for + +00:02:45.160 --> 00:02:47.640 +having me here + +00:02:47.640 --> 00:02:48.200 +today. + +00:02:48.200 --> 00:02:53.160 +And my name is Shaomei Wu and right now + +00:02:53.160 --> 00:02:57.480 +I'm the founder and CEO of AImpower.org + +00:02:58.080 --> 00:03:02.160 +Tech non profit that researches + +00:03:02.160 --> 00:03:05.320 +and co-create + +00:03:06.080 --> 00:03:09.800 +in-powering technology for + +00:03:09.840 --> 00:03:13.640 +other ways marginalized users. + +00:03:13.640 --> 00:03:15.800 +And first of all, I want to + +00:03:16.960 --> 00:03:20.400 +and also share, + +00:03:20.440 --> 00:03:26.200 +that I do have a stutter so that you may hear + +00:03:26.440 --> 00:03:30.720 +there'll be more pauses and ... +when I talk + +00:03:32.080 --> 00:03:33.920 +and before AImpower. + +00:03:33.920 --> 00:03:37.840 +I was a research scientist at Facebook + +00:03:38.920 --> 00:03:42.080 +leading a lot of research and + +00:03:43.680 --> 00:03:46.120 +product work on + +00:03:46.760 --> 00:03:50.480 +accessibility, inclusion and equity. + +00:03:51.160 --> 00:03:56.400 +So one of those product that I shipped is + +00:03:56.400 --> 00:03:59.240 +automatic alt texts + +00:04:02.840 --> 00:04:07.520 +and these you're allowed to +we will provide + +00:04:08.080 --> 00:04:10.320 +short and + +00:04:11.440 --> 00:04:13.560 +machine generated + +00:04:13.560 --> 00:04:16.440 +description of + +00:04:17.960 --> 00:04:21.800 +images on Facebook and Instagram + +00:04:23.160 --> 00:04:28.880 +to screen reader users in real time. + +00:04:29.880 --> 00:04:31.880 +So when it comes to + +00:04:31.880 --> 00:04:35.280 +quality of automated alt text + +00:04:35.840 --> 00:04:39.280 +or other similar + +00:04:40.480 --> 00:04:41.880 +systems, + +00:04:42.240 --> 00:04:44.400 +we saw two kind of biggest + +00:04:45.480 --> 00:04:50.280 +area of development that we wanted to do. + +00:04:50.760 --> 00:04:54.240 +And the first one is accuracy, + +00:04:54.320 --> 00:04:57.480 +which I think we talked a lot about + +00:04:57.480 --> 00:05:00.960 +in the last panel as well. + +00:05:02.520 --> 00:05:05.120 +But I want to talk + +00:05:05.120 --> 00:05:09.840 +it'll be more about the second one, +which is the + +00:05:10.880 --> 00:05:12.960 +richness of those + +00:05:14.000 --> 00:05:16.000 +descriptions. + +00:05:16.760 --> 00:05:20.520 +So to be honest, + +00:05:20.760 --> 00:05:26.480 +like the alt texts that we generated + +00:05:26.520 --> 00:05:29.560 +was quite + +00:05:29.880 --> 00:05:32.320 +limited, you know, a lot of + +00:05:33.440 --> 00:05:37.880 +users they will say +that it's more like + +00:05:38.560 --> 00:05:40.680 +teasers, kind of, you know, + +00:05:41.960 --> 00:05:44.880 +and telling you like, oh yeah, five + +00:05:45.720 --> 00:05:49.200 +people smiling, pizza, indoor. + +00:05:49.960 --> 00:05:51.600 +But no more than that. + +00:05:51.600 --> 00:05:59.880 +No more than what kind of pizza +and what kind of like + +00:06:00.520 --> 00:06:03.000 +indoor environment. Is it at home? + +00:06:03.000 --> 00:06:06.200 +Is it like a restaurant? + +00:06:06.200 --> 00:06:12.720 +So I think our users really, +when they kind of gathered all of the + +00:06:13.000 --> 00:06:17.520 +richness of what someone who has eyesight can see, + +00:06:17.880 --> 00:06:20.320 +you know, can handle access. + +00:06:21.120 --> 00:06:24.320 +So one particular kind + +00:06:26.240 --> 00:06:31.200 +an area +that users do really want to know more + +00:06:31.360 --> 00:06:37.000 +is it's about people who they are, +how do they + +00:06:38.480 --> 00:06:39.120 +look? + +00:06:39.920 --> 00:06:44.280 +Look like... race, gender and + +00:06:45.320 --> 00:06:47.640 +even how + +00:06:48.320 --> 00:06:49.840 +attractive they are + +00:06:49.840 --> 00:06:53.120 +because that is something +really kind of socially salient. + +00:06:53.880 --> 00:06:58.960 +So that was kind of big challenge for us. + +00:06:59.040 --> 00:07:01.920 +One way was designing our + +00:07:03.560 --> 00:07:07.040 +our system + +00:07:08.280 --> 00:07:11.520 +because, you know, like, how can we share + +00:07:11.520 --> 00:07:14.560 +those kind of attribute in a, like, + +00:07:15.520 --> 00:07:19.720 +you know, both accurate and kind of socially + +00:07:20.880 --> 00:07:22.400 +conscious way. + +00:07:22.400 --> 00:07:26.840 +So we actually opt to not + +00:07:28.080 --> 00:07:29.120 +kind of + +00:07:30.120 --> 00:07:34.200 +showing like the race and gender of the, + +00:07:35.160 --> 00:07:39.720 +the people being photographed, + +00:07:40.640 --> 00:07:43.640 +but which I you know, + +00:07:43.920 --> 00:07:46.600 +we got actually a lot of complaint on + +00:07:47.160 --> 00:07:52.200 +but how to kind of +yeah convey those in a kind of socially + +00:07:53.400 --> 00:07:57.040 +respectful way I think it's something we + +00:07:57.840 --> 00:08:00.520 +we should really work like work on + +00:08:00.960 --> 00:08:07.840 +and now I can see kind of like a few ways +that we can make that better. + +00:08:07.880 --> 00:08:12.080 +For example, like, +considering the like... + +00:08:16.800 --> 00:08:18.480 +relationship + +00:08:18.480 --> 00:08:21.760 +between kind of people in the photo and + +00:08:23.040 --> 00:08:27.120 +yours, for example, +like if they are friends + +00:08:27.120 --> 00:08:32.600 +and then we can just tell them the name, +and, you know, other things about those people + +00:08:32.920 --> 00:08:36.320 +and then another thing +is to kind of, give people + +00:08:38.560 --> 00:08:41.840 +progress details. + +00:08:42.040 --> 00:08:48.200 +So then you know, I have some kind of like option +to kind of allow the consumer + +00:08:48.200 --> 00:08:52.200 +of those alt texts +to kind of request more + +00:08:54.840 --> 00:08:57.960 +details that we just cannot provide + +00:08:57.960 --> 00:09:02.840 +by our systems. + +00:09:03.680 --> 00:09:07.800 +So I will be done here and let + +00:09:09.280 --> 00:09:12.560 +others talk. + +00:09:12.560 --> 00:09:12.960 +Thank you Shaomei. + +00:09:12.960 --> 00:09:16.680 +Shivam, your thoughts on + +00:09:17.080 --> 00:09:21.680 +how can we improve the quality of machine +generated descriptions? + +00:09:23.080 --> 00:09:26.280 +Okay, so this is a two part thing. + +00:09:26.280 --> 00:09:29.080 +So when you come +to technically implementing more, say + +00:09:30.000 --> 00:09:32.400 +how you have design your model, +how you train them, + +00:09:32.720 --> 00:09:35.800 +and what the whoever the stakeholders +of designing a particular model + +00:09:35.800 --> 00:09:39.760 +is very much necessary +in how and when to get the quality machine + +00:09:39.760 --> 00:09:41.200 +generated description. + +00:09:41.200 --> 00:09:44.400 +Now when you when we take into account +users personalized + +00:09:44.400 --> 00:09:47.520 +preferences, this is a two part... + +00:09:47.560 --> 00:09:50.320 +So first, let's take an example. + +00:09:50.320 --> 00:09:54.360 +I am a person who knows Spanish +and my model, + +00:09:54.800 --> 00:09:58.360 +a very famous model gives descriptions +in English right, now + +00:09:58.720 --> 00:10:01.560 +that model or whatever +the consumption of that model + +00:10:01.680 --> 00:10:04.600 +is like let's say +you're using an API to consume the model. + +00:10:05.040 --> 00:10:08.680 +So that should take into account +the user's personalized preferences + +00:10:08.680 --> 00:10:12.400 +of his language +and write the output based on that as well. + +00:10:12.760 --> 00:10:17.080 +So this diversity of a model +to prepare output in multiple + +00:10:17.080 --> 00:10:21.280 +formats, multiple languages +is something that can be looked into. + +00:10:21.280 --> 00:10:25.160 +This is how the quality of the machine +generated description increases. + +00:10:25.440 --> 00:10:28.600 +Now, you did not train the complete model +separately. + +00:10:28.600 --> 00:10:29.640 +What you can do + +00:10:29.640 --> 00:10:33.440 +is just a post-processing scripts +for your models and that can help. + +00:10:33.440 --> 00:10:38.680 +And you it's not much of an effort +when we say as a model training input, + +00:10:38.680 --> 00:10:42.600 +but it's very simple solution +to what can be a best solution. + +00:10:42.920 --> 00:10:45.520 +The other thing is +how you prepare your quality data. + +00:10:45.520 --> 00:10:50.760 +Now you should fully carefully categorize +it is strictly if needed. + +00:10:51.080 --> 00:10:55.640 +And let's say you have input data that are +blurred images and all sorts of thing. + +00:10:55.920 --> 00:10:59.560 +So you need to carefully +prepare your model, creating data and + +00:10:59.800 --> 00:11:02.360 +based on that data, +your description would be + +00:11:02.640 --> 00:11:07.400 +a bit more clearer and the population +will also be factored in. + +00:11:07.400 --> 00:11:08.280 +When you + +00:11:08.400 --> 00:11:13.000 +look into how you can both process +your data for a certain group of people. + +00:11:13.760 --> 00:11:17.160 +So that's how I see it. + +00:11:17.160 --> 00:11:17.760 +Thank you. + +00:11:17.760 --> 00:11:21.880 +And Amy, want to share your experiences? + +00:11:22.920 --> 00:11:23.400 +Sure. + +00:11:23.400 --> 00:11:25.920 +So a couple of ways that I've seen + +00:11:26.720 --> 00:11:31.440 +that I think are sort of promising +maybe to use NLP to improve quality. + +00:11:31.800 --> 00:11:34.600 +One thing I started +seeing recently is people, + +00:11:35.200 --> 00:11:39.080 +you know, +starting to consider context around the + +00:11:39.480 --> 00:11:42.280 +the image that's going to be described, +to maybe + +00:11:42.280 --> 00:11:45.880 +create a description +that's more that's more helpful. + +00:11:45.880 --> 00:11:47.760 +So imagine, you know, + +00:11:47.760 --> 00:11:51.720 +someone writes a post on Twitter +and they couple that post with an image. + +00:11:51.880 --> 00:11:57.560 +So considering the the post +and the image together, maybe might inform + +00:11:57.720 --> 00:12:01.080 +models on how to create something +that's more informative. + +00:12:01.080 --> 00:12:05.600 +So for instance, +if I posted a picture of myself + +00:12:06.440 --> 00:12:08.600 +snowboarding +and I said I learned a new trick, + +00:12:08.600 --> 00:12:11.760 +then it might be important to tell me +what trick you learned. + +00:12:12.320 --> 00:12:15.560 +Whereas on the other hand, +I said, I just went on vacation. + +00:12:15.560 --> 00:12:17.800 +You know, the exact trick +might not matter as much. + +00:12:18.360 --> 00:12:22.560 +And so I think that the idea +of like using language understanding + +00:12:22.560 --> 00:12:25.920 +to get more information about the context +before making a prediction is promising + +00:12:26.560 --> 00:12:27.480 +in another way. + +00:12:27.480 --> 00:12:30.600 +I've sort of seen it used to +maybe improve the quality kind of + +00:12:30.640 --> 00:12:31.680 +goes back to the other + +00:12:32.840 --> 00:12:36.080 +the other answers that were given +so maybe you can use question + +00:12:36.080 --> 00:12:39.080 +answering about the image +to gain more information when you need it. + +00:12:40.440 --> 00:12:42.480 +Oh, one thing + +00:12:42.480 --> 00:12:48.040 +I've also thought about is seeing +if maybe users could give examples + +00:12:48.040 --> 00:12:52.640 +or their preferences +about descriptions in in natural language. + +00:12:52.640 --> 00:12:55.800 +So this is an example of a description +maybe we can copy the style + +00:12:55.800 --> 00:12:59.160 +of this description when we're applying it +to other descriptions. + +00:12:59.400 --> 00:13:03.960 +So maybe I like to hear about the costumes +someone wears in a + +00:13:04.920 --> 00:13:08.320 +in in a video, +and I wish that future descriptions + +00:13:08.800 --> 00:13:12.560 +might include more information about that +rather than summarizing them. + +00:13:12.880 --> 00:13:16.400 +And then finally, one other way I've seen, + +00:13:17.240 --> 00:13:20.120 +I've used NLP to improve + +00:13:20.120 --> 00:13:23.000 +quality, is also based on summarization. + +00:13:23.640 --> 00:13:27.520 +So there can be times +when there's more to describe than time + +00:13:27.520 --> 00:13:28.680 +you have to describe it. + +00:13:28.680 --> 00:13:32.720 +So especially in videos, +there's often a really small amount + +00:13:32.720 --> 00:13:36.120 +of time to describe +without overlapping the other audio. + +00:13:36.640 --> 00:13:42.080 +So one way you can use use NLP to improve +the quality + +00:13:42.360 --> 00:13:45.320 +is by trying to summarize +those descriptions so they fit + +00:13:45.320 --> 00:13:49.640 +within the time you have and they don't +decrease the experience of people + +00:13:50.560 --> 00:13:53.080 +trying to watch the video +and hear the audio at the same time. + +00:13:53.080 --> 00:13:55.560 +Yeah, yeah. + +00:13:55.560 --> 00:14:00.600 +That's that's +definitely a good use for NLP. + +00:14:01.440 --> 00:14:03.800 +Michael, still in this topic + +00:14:03.920 --> 00:14:08.560 +and I would like to have your perspective + +00:14:08.560 --> 00:14:12.560 +on initiatives from WAI that + +00:14:14.200 --> 00:14:17.000 +might assist users in + +00:14:17.400 --> 00:14:19.920 +providing their preferences + +00:14:20.760 --> 00:14:23.560 +so that eventually + +00:14:23.560 --> 00:14:26.760 +models can use those + +00:14:27.600 --> 00:14:33.440 +or anything +that might be ongoing in that regard. + +00:14:33.440 --> 00:14:34.200 +Okay. + +00:14:34.560 --> 00:14:39.600 +So first of all, just give the disclaimer +for anybody new to this session + +00:14:39.600 --> 00:14:42.320 +that I'm not a machine learning +professional, I'm speaking from the + +00:14:43.080 --> 00:14:46.080 +of my work +and the Web accessibility initiative. + +00:14:46.440 --> 00:14:47.640 +I do want to talk briefly. + +00:14:47.640 --> 00:14:52.280 +I think the other panelists +covered almost anything + +00:14:52.280 --> 00:14:54.760 +I would have said + +00:14:54.920 --> 00:15:00.720 +one thing that based on my knowledge +of how machine learning works generally + +00:15:00.720 --> 00:15:04.080 +today, our models tend to be focused + +00:15:04.080 --> 00:15:06.480 +on, you know, of our particular ability, + +00:15:07.680 --> 00:15:09.080 +and it's not universal. + +00:15:09.080 --> 00:15:13.640 +And the future models +will have more abilities combined. + +00:15:13.640 --> 00:15:18.440 +But, you know, so there may be one model +that can recognize this is a human. + +00:15:18.440 --> 00:15:22.440 +And here are those attributes, another one +that you can say this is this human, + +00:15:23.280 --> 00:15:26.800 +and yet another one that can say this +human plus that human + +00:15:26.800 --> 00:15:28.320 +equals this relationship. + +00:15:29.320 --> 00:15:29.920 +So all of + +00:15:29.920 --> 00:15:32.360 +that information, +I believe is separate right now. + +00:15:33.000 --> 00:15:36.800 +So the ability for models +to share contexts, + +00:15:37.080 --> 00:15:40.960 +I think is going to be +a part of the solution that we need. + +00:15:40.960 --> 00:15:47.760 +So what I can speak up for in the Web +accessibility initiative. + +00:15:47.760 --> 00:15:52.680 +So we are only beginning to explore + +00:15:52.960 --> 00:15:57.120 +what AI and accessibility means. + +00:15:57.120 --> 00:16:00.200 +And so this symposium +is a part of that process. + +00:16:01.160 --> 00:16:04.400 +We have a practice of doing + +00:16:04.920 --> 00:16:07.520 +research papers, +sort of literature reviews, + +00:16:08.040 --> 00:16:11.000 +and then proposing accessibility +user requirements. + +00:16:11.000 --> 00:16:16.120 +So that would be something that, you know, +we could be working on to + +00:16:17.160 --> 00:16:18.680 +start gathering this information. + +00:16:18.680 --> 00:16:22.640 +And from there we decide what to do, +whether the content goes into guidelines + +00:16:22.640 --> 00:16:26.000 +or into new technologies or whatever. + +00:16:26.000 --> 00:16:30.840 +But I think most of the resources around +AI are + +00:16:31.760 --> 00:16:36.200 +would fit into new resources +for those categories. + +00:16:36.200 --> 00:16:38.520 +Okay, great. Thanks. And + +00:16:39.480 --> 00:16:42.920 +I would like now to move on to addressing +something + +00:16:42.920 --> 00:16:47.400 +that was basically the core of Jutta’s +keynote. + +00:16:47.400 --> 00:16:48.720 +So it's + +00:16:49.080 --> 00:16:52.800 +discrimination bias +or any other type of bias. + +00:16:53.080 --> 00:16:55.280 +And here + +00:16:55.720 --> 00:16:58.760 +also looking at something +that Antonio Gambabari + +00:16:59.160 --> 00:17:02.640 +has entered in the Q&A +for the previous panel. + +00:17:02.640 --> 00:17:04.680 +But I think it's also a + +00:17:05.760 --> 00:17:07.840 +very well fit. + +00:17:07.840 --> 00:17:12.280 +It fits very well into this topic +and it brought out + +00:17:13.760 --> 00:17:15.320 +the use + +00:17:15.960 --> 00:17:19.280 +large language models, LLMs + +00:17:19.280 --> 00:17:21.560 +and which are currently + +00:17:22.160 --> 00:17:25.240 +getting a lot of traction +and a lot of spotlight. + +00:17:25.760 --> 00:17:29.120 +And and do you think these LLMs + +00:17:30.240 --> 00:17:33.560 +can open up + +00:17:33.600 --> 00:17:39.000 +new avenues, as Antonio +Gambabari was mentioning, for reducing + +00:17:39.320 --> 00:17:42.480 +the different type of bias that we see + +00:17:43.160 --> 00:17:48.480 +as a result of the use of AI trained models? + +00:17:49.240 --> 00:17:53.280 +And Shivam you want to go first, this time? + +00:17:53.280 --> 00:17:54.680 +Yeah, sure, sure. + +00:17:54.680 --> 00:17:57.880 +So this is quite a question which is + +00:17:58.160 --> 00:18:01.760 +and has been close to my heart as well, +how can we address social + +00:18:01.760 --> 00:18:05.600 +bias, in largely any model. +As part of industry + +00:18:05.600 --> 00:18:08.960 +have seen a lot of ML models trainings +how the output comes. + +00:18:09.360 --> 00:18:11.960 +So social ML model +results of data that they have + +00:18:12.440 --> 00:18:15.920 +and how the social attitudes +are represented within that model. + +00:18:16.520 --> 00:18:20.520 +And most of that available +data is used between models publicly sold, + +00:18:21.280 --> 00:18:23.840 +which continuously degree of bias +that you can see, + +00:18:23.840 --> 00:18:27.480 +because most of the data that are +generated on Internet is basically + +00:18:28.480 --> 00:18:30.280 +those people who can consume it, right? + +00:18:30.280 --> 00:18:35.160 +It it's not that everybody once would +who doesn't even know what it is. + +00:18:35.400 --> 00:18:36.840 +They cannot create data over there. + +00:18:36.840 --> 00:18:42.040 +So most of the data that is available to +train the model, it's built out of that. + +00:18:42.040 --> 00:18:45.920 +So that's how you see a bias in one way. +The other + +00:18:45.920 --> 00:18:49.720 +instance I can give an example is +you will see a lot of street violence, + +00:18:49.720 --> 00:18:53.760 +homelessness, drug overdose and, +all those things overrepresented + +00:18:54.080 --> 00:18:58.280 +in the text that discuss mental illness, +although these both are not similar, + +00:18:58.960 --> 00:19:01.360 +but you will find this kind +of representation + +00:19:01.600 --> 00:19:03.720 +in ML outputs like + +00:19:05.040 --> 00:19:06.080 +how can we address this? + +00:19:06.080 --> 00:19:10.040 +Now there +there is a novel way of human in the loop. + +00:19:10.040 --> 00:19:14.480 +Our human feedback loop on an existing +models where you can provide some feedback + +00:19:14.480 --> 00:19:18.840 +to the already existing model that this is +the sort of output is not correct. + +00:19:18.840 --> 00:19:22.600 +This can be a correct version and +this can be a good version some some human + +00:19:23.600 --> 00:19:24.480 +interface + +00:19:24.480 --> 00:19:24.920 +is needed or + +00:19:24.920 --> 00:19:29.280 +what that now the other aspect of it +is the representational + +00:19:29.280 --> 00:19:30.320 +training of ML models. + +00:19:30.320 --> 00:19:32.320 +Now, the underlying data is models. + +00:19:32.560 --> 00:19:34.400 +It's the main source of the issue here. + +00:19:34.400 --> 00:19:37.960 +So you need to correctly source your data +and at least up to date, + +00:19:38.800 --> 00:19:41.320 +you're not all representing +one section of data. + +00:19:41.600 --> 00:19:44.000 +For example, +let's say you have a bigger society. + +00:19:44.560 --> 00:19:48.920 +This society can be underprivileged, +overprivileged and maybe some rigid persons. + +00:19:48.920 --> 00:19:50.960 +They just account society. + +00:19:50.960 --> 00:19:53.800 +Now, you cannot just take the data +from one section of society and train + +00:19:53.840 --> 00:19:57.680 +the model and say that I can give you +a complete picture of the area. + +00:19:58.160 --> 00:20:00.920 +There's much a separate section +which are underrepresented. That's + +00:20:00.920 --> 00:20:04.600 +what is happening with all the models +right from the start of ML. + +00:20:05.040 --> 00:20:10.720 +You can see. Now what we can also do to +mitigate is you can create inclusive buckets + +00:20:10.720 --> 00:20:13.960 +where the developer of ML models +or designer of ML models + +00:20:13.960 --> 00:20:17.080 +you can give the inclusive bucket +training to them. + +00:20:17.080 --> 00:20:17.440 +You can + +00:20:18.720 --> 00:20:19.160 +get them + +00:20:19.160 --> 00:20:22.800 +aware that what is happening +and what can how we can mitigate this. + +00:20:23.000 --> 00:20:26.680 +So all the person who are included +in ML generation + +00:20:26.720 --> 00:20:31.120 +or there are a lot of you still going on, +I mean a lot of data extraction goes on. + +00:20:31.320 --> 00:20:33.640 +So all those people can be trained +for inclusiveness. + +00:20:34.520 --> 00:20:36.920 +There are multiple tools +that help us do that. + +00:20:37.600 --> 00:20:39.760 +Like if you are creating a model, +you can test in + +00:20:39.960 --> 00:20:42.800 +Google helps us in + +00:20:43.080 --> 00:20:43.680 +analyzing the + +00:20:43.680 --> 00:20:46.320 +models like Google +has a lot of tools, AI fairness, + +00:20:46.440 --> 00:20:49.360 +So how your models are performing +when talk about + +00:20:49.360 --> 00:20:52.560 +a lot of including +inclusive outputs of your data. + +00:20:53.440 --> 00:20:55.960 +Also you need to do a thorough +testing of your models + +00:20:55.960 --> 00:21:02.040 +whenever you go ahead to include that +all the outputs are properly + +00:21:02.840 --> 00:21:06.280 +aligned, properly represented, +all the sections of your model + +00:21:06.560 --> 00:21:10.240 +which it is intended to be used +should be represented well. + +00:21:10.480 --> 00:21:14.520 +Your testing should be that model +in case of any models you're creating, + +00:21:15.000 --> 00:21:16.880 +because now we're not at that stage that + +00:21:17.840 --> 00:21:19.080 +AI and ML is + +00:21:19.080 --> 00:21:21.840 +in the starting off state, +it's quite mature + +00:21:21.840 --> 00:21:23.080 +right now. +We are seeing + +00:21:23.080 --> 00:21:26.280 +a lot of breakthrough technology +so we can do this going forward. + +00:21:26.280 --> 00:21:28.440 +I guess this can be a solution. + +00:21:30.000 --> 00:21:31.800 +Okay, thank you. + +00:21:31.800 --> 00:21:36.400 +Shivam. +Shaomei, can we have your input on how + +00:21:36.480 --> 00:21:41.200 +can we address that social bias +or other types of bias? + +00:21:41.200 --> 00:21:44.160 +Yeah. So + +00:21:44.520 --> 00:21:44.880 +yeah. + +00:21:44.880 --> 00:21:48.120 +So then on these, +I want to kind of go back to + +00:21:48.160 --> 00:21:52.400 +I just kind of talk about before in + +00:21:53.520 --> 00:21:55.640 +particular on + +00:21:56.400 --> 00:21:58.560 +sensitive social + +00:21:58.880 --> 00:22:03.400 +identities, you know, +about people + +00:22:03.400 --> 00:22:07.080 +on the photos. + +00:22:07.080 --> 00:22:10.200 +I, I don't see I kind of the + +00:22:10.200 --> 00:22:12.480 +way for the + +00:22:13.880 --> 00:22:16.920 +a current + +00:22:16.920 --> 00:22:19.080 +and machine learning + +00:22:21.000 --> 00:22:23.240 +system to kind of accurately + +00:22:24.320 --> 00:22:26.880 +come out with those labels. + +00:22:26.880 --> 00:22:31.800 +I think the key +kind of issue here is a lot of those + +00:22:33.760 --> 00:22:34.720 +systems will + +00:22:34.720 --> 00:22:37.560 +kind of really assume these like fixed + +00:22:37.720 --> 00:22:41.200 +and definite need + +00:22:43.800 --> 00:22:47.560 +these kind of social categorizations + +00:22:48.840 --> 00:22:53.800 +such as race and gender. + +00:22:53.800 --> 00:22:56.840 +So I think maybe we should think + +00:22:56.840 --> 00:22:59.240 +be an kind of + +00:23:00.320 --> 00:23:03.720 +a machine learning systems + +00:23:03.720 --> 00:23:05.760 +and kind of find a way to + +00:23:07.320 --> 00:23:10.000 +to kind of attribute people + +00:23:10.880 --> 00:23:13.800 +respective race back fully + +00:23:15.520 --> 00:23:17.920 +through by doing this kind of like + +00:23:17.920 --> 00:23:20.600 +having to like agencies + +00:23:21.000 --> 00:23:23.840 +of those being kind of + +00:23:25.880 --> 00:23:29.280 +photographed and being + +00:23:29.520 --> 00:23:31.040 +described. + +00:23:31.040 --> 00:23:35.480 +For example, I think now a lot of people +has been kind of + +00:23:36.560 --> 00:23:38.960 +specifying their + +00:23:39.760 --> 00:23:42.720 +pronouns, for example in their + +00:23:42.720 --> 00:23:46.360 +social media bios + +00:23:46.800 --> 00:23:49.960 +and I think those kind of + +00:23:51.000 --> 00:23:52.680 +information should be + +00:23:52.680 --> 00:23:55.720 +made use of or could be kind of + +00:23:56.040 --> 00:23:59.000 +made use of one way + +00:23:59.000 --> 00:24:01.800 +of assigning on, you know, + +00:24:01.800 --> 00:24:04.760 +one way to describing + +00:24:04.760 --> 00:24:07.200 +the gender of somebody in the photo. + +00:24:07.920 --> 00:24:11.040 +And also another kind of + +00:24:13.200 --> 00:24:17.080 +interactions that we have been kind of +exploring + +00:24:17.080 --> 00:24:20.120 +is to just kind of describing + +00:24:20.120 --> 00:24:23.120 +the appearances + +00:24:23.320 --> 00:24:25.440 +instead of identities. + +00:24:26.200 --> 00:24:28.480 +For example, what kind of + +00:24:28.480 --> 00:24:31.200 +describe + +00:24:31.200 --> 00:24:35.200 +skin tones or hair style + +00:24:35.640 --> 00:24:38.560 +and outfit + +00:24:39.600 --> 00:24:41.320 +instead of + +00:24:41.320 --> 00:24:45.480 +assigning a kind of race +or gender label of somebody. + +00:24:46.320 --> 00:24:51.240 +But I don't think any of those solutions +can really address + +00:24:51.240 --> 00:24:56.280 +the kind of the real cause of the problem. + +00:24:56.360 --> 00:25:01.200 +So I don't really have a very good +answer on this + +00:25:01.800 --> 00:25:04.960 +I think maybe we should, + +00:25:04.960 --> 00:25:08.520 +you know, like maybe the alternative +is to kind of think of the way + +00:25:08.840 --> 00:25:12.880 +to come away and kind of share who we are. + +00:25:13.480 --> 00:25:16.880 +We saw so much relying on the kind of + +00:25:21.640 --> 00:25:25.120 +images like we are today. + +00:25:25.240 --> 00:25:28.360 +So, you know, like, how can we convey + +00:25:28.880 --> 00:25:30.680 +the kind of + +00:25:31.800 --> 00:25:34.640 +information that we want to share online, + +00:25:35.080 --> 00:25:37.840 +not so visual centric way. + +00:25:38.400 --> 00:25:39.440 +I think that's a kind of + +00:25:40.520 --> 00:25:41.640 +bigger + +00:25:44.520 --> 00:25:47.320 +question, the way I saw it, too. + +00:25:47.320 --> 00:25:48.400 +Thank you. + +00:25:49.560 --> 00:25:50.640 +Thank you, Shaomei. + +00:25:50.640 --> 00:25:53.720 +And Amy next to you. + +00:25:54.640 --> 00:25:58.600 +I think the prior +the prior answer is mostly covered. + +00:25:58.600 --> 00:26:00.480 +The things I was going to mention I loved. + +00:26:00.480 --> 00:26:03.040 +Shaomei’s answer about, + +00:26:03.040 --> 00:26:06.240 +you know, describing ourselves in ways +that are like figuring out ways + +00:26:06.240 --> 00:26:10.320 +that don't rely on the visual information +and giving agency to people + +00:26:10.320 --> 00:26:14.840 +to just to add their own identities +that they want to be shared. + +00:26:15.000 --> 00:26:18.520 +I will say that I think that that depends +in different contexts. + +00:26:18.520 --> 00:26:19.680 +You might want to share + +00:26:19.680 --> 00:26:23.800 +different parts of your identity +if it's important to you and you might. + +00:26:24.240 --> 00:26:29.040 +And so I think that even things +that give like end users agency + +00:26:29.040 --> 00:26:33.720 +might have a lot of subtlety and how +they would be applied in different cases. + +00:26:34.640 --> 00:26:37.800 +And I like the idea, +you know, of describing, + +00:26:37.800 --> 00:26:39.560 +you know, aspects of appearance. + +00:26:39.560 --> 00:26:42.520 +I think you're missing one +one challenge with that is + +00:26:42.520 --> 00:26:43.920 +you might be sort of trading off + +00:26:43.920 --> 00:26:47.280 +between these like aspects of appearance +that you're describing and + +00:26:48.080 --> 00:26:51.520 +and the efficiency +with which someone can like + +00:26:51.720 --> 00:26:52.760 +maybe they're not going to get + +00:26:52.760 --> 00:26:56.280 +the information as quickly as a sighted +person would perceiving that person. + +00:26:56.760 --> 00:27:00.040 +And just because, +you know, audio occurs over time. + +00:27:00.040 --> 00:27:04.200 +So so I think there's it's an extremely +difficult, difficult challenge. + +00:27:05.000 --> 00:27:07.280 +And and in some cases it can matter. + +00:27:07.280 --> 00:27:10.920 +Like I can imagine, you know, +seeing a photograph of the leadership + +00:27:10.920 --> 00:27:14.080 +of a company, +you might want to know some some quick + +00:27:15.360 --> 00:27:18.120 +details about about the demographics +of who's who's leading it. + +00:27:18.120 --> 00:27:22.680 +For instance, +one one thing that I've noticed that + +00:27:23.240 --> 00:27:26.160 +is is sort of related to +this is that, you know, + +00:27:26.280 --> 00:27:29.640 +when I'm when I am asking. + +00:27:29.640 --> 00:27:31.200 +So I sometimes, + +00:27:31.200 --> 00:27:35.280 +you know, have people describe videos +and there can be a lot of differences + +00:27:35.280 --> 00:27:38.360 +in which aspects, even if they're going +to describe the aspects of someone's appearance + +00:27:39.080 --> 00:27:42.360 +the way they describe those based on +who is in front of them + +00:27:42.720 --> 00:27:45.880 +can also differ based on biases +that people have. + +00:27:45.880 --> 00:27:49.680 +So if people see a woman, +they might describe her differently + +00:27:49.680 --> 00:27:50.760 +than they would describe a man. + +00:27:50.760 --> 00:27:54.360 +They might focus on different +aspects of of appearance. + +00:27:54.360 --> 00:27:58.320 +And so I think even things that go towards +describing aspects of appearance + +00:27:58.320 --> 00:28:02.280 +will have to be, you know, very carefully, +very carefully designed. + +00:28:02.280 --> 00:28:05.680 +And it really feels like a challenging +a challenging problem. + +00:28:05.720 --> 00:28:08.040 +Yeah. + +00:28:08.040 --> 00:28:11.080 +Thank you so much, Amy. + +00:28:11.080 --> 00:28:14.200 +Michael, any thoughts on this? + +00:28:14.200 --> 00:28:15.680 +And I would + +00:28:16.680 --> 00:28:18.200 +add something + +00:28:18.200 --> 00:28:21.920 +here, especially for you, ... + +00:28:22.160 --> 00:28:26.600 +do you see any future role +in accessibility guidelines + +00:28:27.600 --> 00:28:29.760 +in contributing to + +00:28:31.560 --> 00:28:34.320 +preventing bias in machine + +00:28:34.320 --> 00:28:37.880 +learning, generated descriptions +or whatever + +00:28:38.400 --> 00:28:43.840 +that results from these models? + +00:28:43.840 --> 00:28:46.040 +I know my answer to that question. + +00:28:46.040 --> 00:28:49.640 +It could be longer +than my prepared answers. + +00:28:49.640 --> 00:28:53.040 +So let's see where we go. + +00:28:53.040 --> 00:28:56.760 +I just want to add a couple of thoughts +to what the others have been saying. + +00:28:57.400 --> 00:28:59.880 +I want to first to categorize bias + +00:29:00.920 --> 00:29:04.240 +as we're talking +so far mainly about bias and recognition. + +00:29:04.240 --> 00:29:08.360 +You know, this... are there biases +of how machine learning recognizes + +00:29:08.360 --> 00:29:10.680 +objects, people, etc., contexts + +00:29:12.240 --> 00:29:17.160 +in that, one thing that magnifies +the challenge and accessibility context + +00:29:17.160 --> 00:29:20.600 +is that the sample size of people +with disabilities + +00:29:20.600 --> 00:29:23.160 +can be smaller in various training sets. + +00:29:24.120 --> 00:29:25.440 +And so there is a risk + +00:29:26.600 --> 00:29:29.040 +that images of people with + +00:29:29.040 --> 00:29:34.440 +disabilities on a training set or contexts +that are important for them, like wheelchair ramps + +00:29:34.440 --> 00:29:38.400 +or something will be excluded as outliers + +00:29:38.400 --> 00:29:43.440 +or will be less +well recognizable by the AI + +00:29:43.440 --> 00:29:46.040 +than, you know, +images of other people are. + +00:29:46.760 --> 00:29:49.600 +So, you know, that's just another + +00:29:50.920 --> 00:29:55.360 +another dimension to the aspects of bias +that we need to look at. + +00:29:55.720 --> 00:30:00.200 +But then we also need to look at the own +bias in the application of this. + +00:30:00.920 --> 00:30:03.400 +You know, we've talked a few times +during the session + +00:30:03.400 --> 00:30:07.520 +about the risk of relying on machine +generated + +00:30:08.640 --> 00:30:11.320 +descriptions, captions +as being good enough, + +00:30:12.520 --> 00:30:15.520 +whereas content +that has more of a mainstream + +00:30:15.520 --> 00:30:18.440 +audience might also have captions +as descriptions. + +00:30:18.440 --> 00:30:22.680 +But get more curated, +you know, quality assurance. + +00:30:23.280 --> 00:30:27.760 +So, you know, +that kind of bias could creep in and + +00:30:28.920 --> 00:30:29.440 +that can + +00:30:29.440 --> 00:30:32.400 +magnify the impact on disability bias, + +00:30:33.360 --> 00:30:37.640 +you know, because it can cause people +to be excluded from the fora + +00:30:38.040 --> 00:30:42.400 +that, often, which people are recruited +to be part of training sets, etc.. + +00:30:42.720 --> 00:30:45.960 +So, you know, again, the ethical +principles from where machine learning + +00:30:46.240 --> 00:30:47.040 +speaks to that. + +00:30:47.040 --> 00:30:50.480 +And I think that we may by identifying +some content + +00:30:50.480 --> 00:30:52.680 +that we need to add to that. + +00:30:53.960 --> 00:30:58.560 +So moving on to what we can do about that, +you know, + +00:30:58.560 --> 00:31:02.280 +I do believe that it's within the scope +of the Web accessibility initiative + +00:31:02.560 --> 00:31:05.640 +or the W3C to provide guidance + +00:31:05.640 --> 00:31:10.680 +in some form +about how AI and accessibility + +00:31:10.680 --> 00:31:13.320 +should work together, +addressing many of these things. + +00:31:14.640 --> 00:31:16.600 +You know, typically this sort of thing + +00:31:16.600 --> 00:31:19.800 +would be a working group +node, which means that it's a + +00:31:21.000 --> 00:31:23.000 +it is a a formal document + +00:31:23.520 --> 00:31:27.040 +published by the W3C +that's had a certain level of review. + +00:31:27.600 --> 00:31:30.120 +There's even opportunities for versions + +00:31:30.120 --> 00:31:32.880 +that have had more review and signoff. + +00:31:33.480 --> 00:31:35.680 +So I think that's one thing +we might like to do. + +00:31:36.320 --> 00:31:39.000 +I'll also talk briefly about the work +that we're doing + +00:31:39.000 --> 00:31:42.000 +on the Web content accessibility guidelines + +00:31:42.000 --> 00:31:45.200 +3.0 sorry, the W3C accessibility + +00:31:45.200 --> 00:31:48.520 +guidelines 3 + +00:31:48.520 --> 00:31:50.600 +or WCAG 3. + +00:31:50.600 --> 00:31:54.360 +We it's a it's +a substantial re-envisioning + +00:31:54.360 --> 00:31:59.000 +and it's been a core requirement + +00:31:59.000 --> 00:32:01.680 +from the beginning +that we wanted to address, you know, + +00:32:03.000 --> 00:32:06.120 +addressed equity in the approach, +in the guidelines. + +00:32:06.120 --> 00:32:09.280 +How are we going to make sure that they're +equitable to people with disabilities? + +00:32:09.520 --> 00:32:13.120 +We've been exploring that in specific ways +and within the working group, + +00:32:13.960 --> 00:32:16.480 +really unpacking that to understand, + +00:32:16.640 --> 00:32:19.560 +you know, the relationship of equity +and accessibility and bias. + +00:32:19.560 --> 00:32:20.800 +And in the other dimension. + +00:32:20.800 --> 00:32:25.360 +So that's turning, you know, +we're connecting that with other work + +00:32:25.360 --> 00:32:31.480 +W3C has been doing to make itself +a more equitable organization. + +00:32:31.480 --> 00:32:36.120 +And so, you know, this is to say +that I believe WCAG 3, + +00:32:36.560 --> 00:32:40.120 +will also have some structure +built in and support resources, + +00:32:40.120 --> 00:32:43.920 +addressing issues of bias specifically. + +00:32:45.040 --> 00:32:47.320 +Now, these are + +00:32:47.480 --> 00:32:50.880 +hopes, not promises, but you know, + +00:32:51.480 --> 00:32:54.240 +that's the direction from activities +like this. + +00:32:56.280 --> 00:32:57.120 +Thank you so much. + +00:32:57.120 --> 00:33:01.200 +And yes, those are really +some exciting avenues that we + +00:33:01.440 --> 00:33:05.400 +we hope that can come to fruition +in the near future. + +00:33:06.360 --> 00:33:08.640 +So I guess final question + +00:33:08.640 --> 00:33:12.800 +for everyone, and it is + +00:33:13.640 --> 00:33:18.080 +I would like to know a bit +about your future perspectives on the use + +00:33:18.080 --> 00:33:23.800 +of natural language processing for the field +or in the field of accessibility. + +00:33:24.360 --> 00:33:27.200 +And I'll start with you this time, Amy. + +00:33:30.920 --> 00:33:33.680 +Yeah, +So I think this is a really exciting area. + +00:33:33.680 --> 00:33:39.200 +And one thing, one one shift +I've found recently among people in NLP + +00:33:39.240 --> 00:33:42.960 +who I talked to is that, you know, as +the models are getting better + +00:33:42.960 --> 00:33:47.880 +at just creating like fluent, fluent text +that looks reasonable, + +00:33:48.240 --> 00:33:49.120 +that lot of people + +00:33:49.120 --> 00:33:52.920 +are becoming more interested in +what are the actual applications of this + +00:33:52.920 --> 00:33:56.560 +and how can we build tools +that actually support those applications + +00:33:56.720 --> 00:33:59.440 +rather than relying +on, you know, automated metrics for, + +00:34:00.360 --> 00:34:03.520 +but that might not, +you know, capture people's experiences. + +00:34:03.520 --> 00:34:08.520 +So I wanted to to note +that that's a direction I found exciting. + +00:34:08.520 --> 00:34:12.800 +So I guess a couple a couple of things +I think could be promising are + +00:34:13.200 --> 00:34:17.600 +and I've kind of mentioned them before +in my other in my other responses, But, + +00:34:17.960 --> 00:34:23.040 +you know, as gain the ability to describe +more and more about the image, I, + +00:34:23.240 --> 00:34:28.680 +I think that NLP can provide +a really good opportunity to personalize + +00:34:29.040 --> 00:34:33.480 +those descriptions based on the person +and what they want as well as the context + +00:34:33.680 --> 00:34:34.080 +there is. + +00:34:34.080 --> 00:34:35.760 +You know, +if you think about walking in a room, + +00:34:35.760 --> 00:34:38.720 +there's like so much +you could possibly describe. + +00:34:38.720 --> 00:34:41.720 +If we can make it easier for people +to get the information that they're + +00:34:41.720 --> 00:34:46.360 +looking for quickly +from their media, that that would be a + +00:34:47.560 --> 00:34:48.840 +a great improvement. + +00:34:48.840 --> 00:34:52.320 +You know, combining computer vision +to recognize things + +00:34:52.320 --> 00:34:56.640 +in the underlying image +and using something like NLP to + +00:34:57.520 --> 00:35:01.680 +to summarize that description +I think is is promising and exciting. + +00:35:02.120 --> 00:35:04.120 +And one other way +I think I'm excited about + +00:35:04.120 --> 00:35:09.080 +it is in its opportunities to maybe help +people with their own description tasks. + +00:35:09.080 --> 00:35:12.600 +So when we have humans +working on descriptions, it's really hard. + +00:35:13.400 --> 00:35:16.360 +So, you know, novices +sometimes have a hard time remembering + +00:35:16.360 --> 00:35:18.560 +and applying the guidelines that exist. + +00:35:18.560 --> 00:35:23.280 +You know, maybe we could rewrite people's +descriptions of videos to be more in line + +00:35:23.280 --> 00:35:24.760 +with how an expert would write them + +00:35:24.760 --> 00:35:28.560 +by making them more concise +or changing the grammar a bit + +00:35:28.560 --> 00:35:32.640 +so that it fits what people are expecting +from their guidelines. + +00:35:32.640 --> 00:35:36.480 +Or we might alert people +to aspects of their own descriptions + +00:35:36.480 --> 00:35:39.920 +that that might need to +that could be changed a little bit + +00:35:39.920 --> 00:35:44.480 +to perhaps reduce something like bias +that they have in the description. + +00:35:44.480 --> 00:35:47.280 +So I think there's there's really lots +of exciting opportunities + +00:35:47.280 --> 00:35:50.680 +in terms of authoring descriptions +as well as making those end descriptions + +00:35:50.680 --> 00:35:51.640 +a little bit better. Yeah. + +00:35:53.280 --> 00:35:53.760 +Great. + +00:35:53.760 --> 00:35:54.120 +Yeah. + +00:35:54.120 --> 00:35:58.120 +Thanks a lot. Shivam? + +00:35:58.120 --> 00:35:59.800 +Yeah, so + +00:36:00.080 --> 00:36:04.200 +I see it a bit of more it now +rather than earlier, + +00:36:04.600 --> 00:36:07.040 +because now the models, +the engines are quite advanced + +00:36:08.040 --> 00:36:10.040 +so I see a good context + +00:36:10.040 --> 00:36:13.960 +aware solution that gives you faster +processing some efficient data. + +00:36:13.960 --> 00:36:14.440 +Right. + +00:36:14.440 --> 00:36:17.840 +And that works on text, video +and as well as audio. + +00:36:17.840 --> 00:36:20.400 +So I see this happening to be a reality. + +00:36:21.360 --> 00:36:23.400 +A good use case +I would have been following up + +00:36:23.400 --> 00:36:27.280 +also is how we can make +the academic textbooks. + +00:36:27.280 --> 00:36:29.920 +And we have academic +assignments, right? + +00:36:29.920 --> 00:36:33.600 +There are multiple charts, +bar chart graphs, all associated data. + +00:36:33.920 --> 00:36:38.360 +If some of these models or technologies +can create better + +00:36:39.120 --> 00:36:42.120 +understanding of those things, +it would help a lot of + +00:36:43.160 --> 00:36:46.960 +people in understanding that +we have difficulty just by reading it. + +00:36:46.960 --> 00:36:50.400 +Or maybe in absence of good quality + +00:36:50.680 --> 00:36:53.320 +descriptions of these charts, bars +and all those things. + +00:36:53.600 --> 00:36:55.840 +I see this happening in the next +few years. + +00:36:56.320 --> 00:36:58.720 +A better description of the generation. + +00:36:59.080 --> 00:37:02.400 +And as a closing comment, I would say +there are different types of consumers + +00:37:02.760 --> 00:37:05.880 +of media that some can easily read +but not comprehend. + +00:37:05.880 --> 00:37:09.440 +Some comprehend easily, +but have difficulty consuming, + +00:37:09.720 --> 00:37:11.200 +consuming it visually. + +00:37:11.200 --> 00:37:16.080 +Now in that sense, the coming NLP +technologies would help designers + +00:37:16.080 --> 00:37:19.920 +have contextual description of outputs +and that I would see in simple terms. + +00:37:20.280 --> 00:37:25.000 +If you give me a simple, efficient faster +output of a media and it's correct, + +00:37:25.000 --> 00:37:29.080 +then it will be the pinnacle +of what I see as the NLP. + +00:37:29.360 --> 00:37:32.440 +And these are for natural language +processing understanding + +00:37:32.560 --> 00:37:37.200 +as well as generation +for all key technologies. + +00:37:37.200 --> 00:37:38.200 +Thank you so much. + +00:37:38.200 --> 00:37:40.560 +It's exciting times ahead. Definitely. + +00:37:41.400 --> 00:37:43.680 +Michael, you want to share your vision. + +00:37:46.120 --> 00:37:49.920 +So based on my knowledge of + +00:37:50.560 --> 00:37:52.800 +how machine learning +in the present day works, + +00:37:54.360 --> 00:37:56.720 +you know, the tools tend to be more + +00:37:56.720 --> 00:38:00.120 +focused on specific abilities, + +00:38:00.120 --> 00:38:02.760 +which means that the context is a bit +isolated. + +00:38:03.680 --> 00:38:09.840 +So I think I'm speaking as a as a person +working the field, + +00:38:09.840 --> 00:38:13.600 +identifying a need rather than something +that may necessarily be a technological + +00:38:15.360 --> 00:38:16.640 +potential. + +00:38:16.640 --> 00:38:20.400 +But the Internet of Things used as APIs + +00:38:20.400 --> 00:38:23.520 +to exchange data +between different types of devices. + +00:38:24.160 --> 00:38:27.640 +And if we could model some structure +like that so that these tools + +00:38:27.640 --> 00:38:32.280 +could share contexts with each other +and negotiate a better group description, + +00:38:32.520 --> 00:38:35.800 +I think that that might be an opportunity +for an early + +00:38:37.160 --> 00:38:39.960 +evolution of this field. + +00:38:39.960 --> 00:38:41.560 +You know, the long term, of course, + +00:38:41.560 --> 00:38:45.920 +I think tools will emerge +with greater sense of context built in, + +00:38:46.960 --> 00:38:49.400 +but that'll probably be, you know, another + +00:38:49.400 --> 00:38:52.400 +tier slash singularity or whatever. + +00:38:52.960 --> 00:38:56.520 +So yeah, that's my view on that near term future +based on my knowledge. + +00:38:57.800 --> 00:38:58.520 +Yeah. + +00:38:58.920 --> 00:39:01.400 +Good, good suggestions too to look at + +00:39:01.400 --> 00:39:04.760 +also. And Shaomei? + +00:39:04.760 --> 00:39:05.600 +Yeah. + +00:39:05.600 --> 00:39:08.120 +So yeah. So + +00:39:08.720 --> 00:39:10.680 +looking into the future + +00:39:10.680 --> 00:39:13.760 +I can see kind of two + +00:39:16.680 --> 00:39:22.080 +areas + +00:39:22.080 --> 00:39:26.720 +that I think +will have a lot of potentials. + +00:39:26.760 --> 00:39:28.960 +And the first one it's from the + +00:39:30.840 --> 00:39:32.480 +technology + +00:39:33.240 --> 00:39:39.400 +perspective +which I agree with Michael that I can see + +00:39:39.760 --> 00:39:42.480 +a lot of gain in kind of + +00:39:44.520 --> 00:39:47.640 +incorporating the context + +00:39:47.640 --> 00:39:50.040 +surrounding photos + +00:39:50.040 --> 00:39:53.720 +and by you know like taking advantage + +00:39:53.720 --> 00:39:56.480 +of the reason and, + +00:39:57.600 --> 00:39:59.800 +and progressing + +00:40:00.800 --> 00:40:05.680 +and deep learning models +that kind of have + +00:40:05.680 --> 00:40:08.160 +what kind of math models are + +00:40:10.080 --> 00:40:14.720 +representations space +So you know like we can embed both + +00:40:14.800 --> 00:40:18.680 +the kind of image +as well as the kind of tags + +00:40:19.680 --> 00:40:21.840 +surrounding it and then + +00:40:21.840 --> 00:40:24.040 +and then add a kind of + +00:40:24.320 --> 00:40:27.760 +metadata such as the author or the time + +00:40:27.760 --> 00:40:29.440 +when, you know, + +00:40:29.880 --> 00:40:35.520 +when the photo was taken, +all kind of posted. + +00:40:35.520 --> 00:40:39.360 +So, you know, a lot of those +can be kind of drawing in a kind of big + +00:40:40.040 --> 00:40:43.920 +represent patient space that kind of that + +00:40:44.960 --> 00:40:47.640 +provides a lot more than just kind of + +00:40:49.040 --> 00:40:50.400 +visual + +00:40:51.520 --> 00:40:53.800 +information alone. + +00:40:53.800 --> 00:40:57.760 +So I think that's a kind of big + +00:40:57.840 --> 00:40:59.840 +technology break + +00:40:59.840 --> 00:41:03.520 +through that we can see +in the kind of near-term future. + +00:41:03.960 --> 00:41:08.960 +But the kind of second thing +I think and more important to me + +00:41:08.960 --> 00:41:12.360 +is the kind of use case + +00:41:12.920 --> 00:41:15.000 +perspectives. + +00:41:15.000 --> 00:41:18.440 +I think right now +when we think about all kind of talk + +00:41:18.440 --> 00:41:19.320 +about + +00:41:22.160 --> 00:41:24.360 +the media + +00:41:25.480 --> 00:41:27.520 +accessibility + +00:41:27.520 --> 00:41:30.920 +we are mostly kind of think +about our consumption case, + +00:41:31.200 --> 00:41:34.720 +like how do I help some people +who can not see to kind of + +00:41:35.640 --> 00:41:40.560 +to kind of consume photos that posted by + +00:41:42.000 --> 00:41:45.720 +others +and mostly by kind of sighted folks. + +00:41:45.720 --> 00:41:49.920 +But I think it's equally important +but largely kind of + +00:41:51.720 --> 00:41:54.000 +overlook is, + +00:41:54.000 --> 00:41:56.520 +is these kind of media + +00:41:57.600 --> 00:42:02.760 +creation + +00:42:03.680 --> 00:42:06.440 +use cases, you know, like +how can we support + +00:42:07.680 --> 00:42:10.000 +people with visual + +00:42:10.320 --> 00:42:12.960 +impairment to kind of + +00:42:13.600 --> 00:42:19.080 +create and kind of share photos and videos + +00:42:20.440 --> 00:42:22.800 +in my own work into, you know, + +00:42:22.960 --> 00:42:26.000 +these use cases, which is why you know + +00:42:27.240 --> 00:42:30.000 +there's like such a gap in what +the kind of + +00:42:31.800 --> 00:42:34.120 +current technology can do. + +00:42:34.120 --> 00:42:37.360 +For example one like all a kind of modern + +00:42:38.520 --> 00:42:41.720 +AI models really failed + +00:42:41.720 --> 00:42:45.120 +when it came to processing photos + +00:42:46.280 --> 00:42:52.200 +taken by people +with visual impairments + +00:42:53.320 --> 00:42:56.840 +because they are +just not the same kind of photo that are used to + +00:42:56.880 --> 00:43:00.080 +train those + +00:43:00.080 --> 00:43:01.360 +models. + +00:43:01.360 --> 00:43:06.000 +So, you know, there's a huge gap in +what kind of current like the kind of + +00:43:07.240 --> 00:43:09.080 +fundamentals of + +00:43:09.080 --> 00:43:11.760 +those models and then what they can do. + +00:43:12.000 --> 00:43:15.840 +And then second is +there is a lot need for more + +00:43:17.640 --> 00:43:20.560 +personalized and + +00:43:20.920 --> 00:43:24.600 +and aesthetic needs. + +00:43:24.920 --> 00:43:27.920 +Right after I take ten + +00:43:28.840 --> 00:43:31.960 +selfies, I wanna find out why + +00:43:32.200 --> 00:43:35.800 +I wanna post that kind of share who I am + +00:43:36.440 --> 00:43:40.320 +and that it's or +it's like we cannot do at all. + +00:43:40.320 --> 00:43:46.080 +We can, you know we can kind of tell +you're like, okay, you'll have ten + +00:43:47.120 --> 00:43:50.680 +photos +and are kind of containing your face, + +00:43:51.600 --> 00:43:53.760 +but you know but, but, + +00:43:54.400 --> 00:43:59.880 +but then how like, +how can we change kind of the models + +00:43:59.880 --> 00:44:04.400 +that can really represent somebody's +space and then, you know, + +00:44:04.440 --> 00:44:09.240 +somebody’s kind of aesthetics +and I think that's another interesting + +00:44:10.560 --> 00:44:11.840 +future + +00:44:12.280 --> 00:44:14.640 +development that I want to see. + +00:44:15.680 --> 00:44:17.840 +So that's all. + +00:44:17.840 --> 00:44:19.400 +Thank you so much, Shaomei. + +00:44:19.400 --> 00:44:23.960 +And I think we +we only have 4 minutes more. + +00:44:23.960 --> 00:44:26.800 +So I won’t risk another question + +00:44:27.520 --> 00:44:31.160 +because we need to +to to end at the top of the hour. + +00:44:31.560 --> 00:44:36.240 +And so I will take the opportunity +to once again, thanks + +00:44:36.680 --> 00:44:42.360 +thank our our panelists and I hope +everyone enjoyed it as much as I did. + +00:44:42.360 --> 00:44:43.080 +And it was + +00:44:44.280 --> 00:44:45.480 +really interesting + +00:44:45.480 --> 00:44:51.880 +and very, very optimistic perspectives +so that + +00:44:53.240 --> 00:44:56.600 +we can see that's not just the, uh, the, + +00:44:56.960 --> 00:45:00.200 +um, risky, uh, + +00:45:00.360 --> 00:45:05.400 +or risk enabling, uh, outputs that A.I. + +00:45:05.400 --> 00:45:06.040 +can have. + +00:45:06.040 --> 00:45:09.160 +So it's nice to, +to have these perspectives. + +00:45:09.160 --> 00:45:10.600 +So thank you once again. + +00:45:10.600 --> 00:45:15.720 +So Shaomei, Shivam, Amy and Michael, +it was brilliant to have you here, + +00:45:17.160 --> 00:45:19.880 +and thanks + +00:45:19.880 --> 00:45:21.200 +who attended. + diff --git a/pages/about/projects/wai-coop/symposium2-captions/where_next.vtt b/pages/about/projects/wai-coop/symposium2-captions/where_next.vtt new file mode 100644 index 00000000000..32723713ed7 --- /dev/null +++ b/pages/about/projects/wai-coop/symposium2-captions/where_next.vtt @@ -0,0 +1,3416 @@ +WEBVTT +Kind: captions +Language: en + +00:00:00.000 --> 00:00:03.040 +I'm going to move on to the + +00:00:03.040 --> 00:00:05.240 +to introducing Shari Trewin + +00:00:05.240 --> 00:00:09.000 +and she is an engineering manager +at Google + +00:00:09.000 --> 00:00:13.520 +and leading a team +that develops assistive technologies. + +00:00:13.520 --> 00:00:16.760 +So I'm really looking forward + +00:00:17.240 --> 00:00:19.800 +to your vision of and + +00:00:20.960 --> 00:00:22.960 +how what's next? + +00:00:22.960 --> 00:00:26.960 +What's the future holding for us +in assistive AI. + +00:00:27.760 --> 00:00:30.080 +So as we had yesterday + +00:00:31.400 --> 00:00:34.040 +at the end of the keynote Jutta + +00:00:35.120 --> 00:00:37.200 +will join us and we'll have this + +00:00:38.600 --> 00:00:42.200 +even more interesting conversation +between Shari Trewin + +00:00:42.720 --> 00:00:46.160 +making it really appetizing for the keynote. + +00:00:46.160 --> 00:00:51.320 +So Shari the floor is yours. + +00:00:51.320 --> 00:00:54.120 +All right. Thank you very much, Carlos. + +00:00:55.040 --> 00:01:09.720 +I just. + +00:01:09.720 --> 00:01:12.080 +Okay. Can you hear me okay? + +00:01:12.080 --> 00:01:14.640 +Yes. Good. + +00:01:14.640 --> 00:01:15.440 +All right. + +00:01:15.440 --> 00:01:20.120 +What a pleasure +it is to participate in this symposium and + +00:01:21.200 --> 00:01:23.240 +hear from our opening keynote, Jutta, + +00:01:23.800 --> 00:01:27.240 +and all our panelists +over the last two days. + +00:01:27.440 --> 00:01:29.240 +Thank you so much for inviting me. + +00:01:29.240 --> 00:01:31.880 +It's my privilege to finish us up now. + +00:01:33.240 --> 00:01:35.960 +So yesterday Jutta grounded us + +00:01:35.960 --> 00:01:39.720 +all in the need to do no harm and talked +about some of the ways + +00:01:39.720 --> 00:01:42.400 +we can think about detecting +and avoiding harm. + +00:01:42.760 --> 00:01:47.120 +Today, I'm going to focus on digital +accessibility applications + +00:01:47.120 --> 00:01:50.880 +of AI in general and ask where next + +00:01:50.880 --> 00:01:57.040 +for assistive AI? + +00:01:57.040 --> 00:01:59.240 +I You see my screen? + +00:02:00.280 --> 00:02:02.360 +Yes, we are. You are. Okay. + +00:02:02.360 --> 00:02:08.160 +I just didn't show mine all good. + +00:02:08.160 --> 00:02:10.280 +So my name is Shari Trewin. + +00:02:10.280 --> 00:02:14.040 +I'm an engineering manager in Google's +accessibility team. + +00:02:14.320 --> 00:02:18.480 +I'm also the past chair of +the ACM's SIGAccess + +00:02:18.680 --> 00:02:22.680 +Special Interest Group +on accessible computing. + +00:02:23.440 --> 00:02:28.720 +My background is computer science and AI, +and I've been thinking about the ways + +00:02:28.720 --> 00:02:32.440 +that AI plays into accessibility +for many years. + +00:02:33.120 --> 00:02:37.040 +Much of my work in thinking on AI +and the AI fairness was done + +00:02:37.040 --> 00:02:41.560 +when I worked at IBM, where I was program +director for IBM Accessibility. + +00:02:41.840 --> 00:02:44.680 +So shout out to any +IBM friends in the audience. + +00:02:46.040 --> 00:02:46.800 +At Google, + +00:02:46.800 --> 00:02:51.120 +my team has a focus on developing +new assistive capabilities + +00:02:51.480 --> 00:02:54.920 +and as we've been discussing +for the last few days, + +00:02:54.920 --> 00:02:57.440 +AI has an important role to play. + +00:02:59.280 --> 00:03:02.040 +So there's been a lot of buzz in the news +lately. + +00:03:02.040 --> 00:03:06.640 +Both excitement and alarm +about generative AI, + +00:03:06.680 --> 00:03:09.040 +especially these large language models. + +00:03:10.520 --> 00:03:13.680 +So for example, the ChatGPT model from OpenAI + +00:03:13.680 --> 00:03:17.200 +has been in the news quite a bit, + +00:03:17.200 --> 00:03:19.800 +in case you haven't played with it +yet, here's an example. + +00:03:19.800 --> 00:03:26.560 +So I asked ChatGPT how will AI change +digital accessibility? + +00:03:26.960 --> 00:03:30.280 +Let's try to get it to write +my talk for me. + +00:03:30.280 --> 00:03:33.680 +And it responded +with a pretty positive viewpoint. + +00:03:33.680 --> 00:03:36.880 +It said AI has the potential +to significantly + +00:03:36.880 --> 00:03:40.120 +improve digital accessibility +for people with disabilities. + +00:03:40.520 --> 00:03:44.000 +Here's a few ways +that AI could contribute to this goal. + +00:03:45.760 --> 00:03:46.520 +It went on to + +00:03:46.520 --> 00:03:49.800 +list four examples of transformative AI. + +00:03:49.840 --> 00:03:54.400 +All of these have been major topics +at this symposium, and for each one + +00:03:54.400 --> 00:03:58.200 +it gave a one or two sentence +explanation of what it was + +00:03:58.400 --> 00:04:02.640 +and who it's helpful for. + +00:04:02.640 --> 00:04:06.640 +Finally, +it concluded that AI has the potential + +00:04:06.640 --> 00:04:10.640 +to make digital content +and devices more accessible to people + +00:04:10.640 --> 00:04:14.360 +with disabilities, allowing them +to fully participate in the digital world. + +00:04:15.320 --> 00:04:17.440 +It seems pretty convincing +and well written. + +00:04:17.960 --> 00:04:22.520 +Perhaps I should just end here +and let AI have the last word. But, + +00:04:23.960 --> 00:04:26.240 +you know, it's +kind of it's kind of mind blowing, + +00:04:26.360 --> 00:04:31.960 +although it was pretty terrible jokes +and this is what it can do + +00:04:31.960 --> 00:04:35.880 +without explicitly +being connected to any source of truth. + +00:04:36.000 --> 00:04:38.840 +But it does get things +sometimes flat out wrong. + +00:04:39.200 --> 00:04:43.760 +And there's a risk of bias in the training +data being reflected in the predictions. + +00:04:44.720 --> 00:04:46.000 +And this limits the + +00:04:46.000 --> 00:04:48.760 +ways that +we can apply this technology today. + +00:04:49.000 --> 00:04:51.800 +But it also gives us a glimpse +into the future. + +00:04:52.640 --> 00:04:55.080 +I'm not going to take medical advice + +00:04:55.080 --> 00:04:59.560 +from a generative AI model yet, +but as we get better + +00:04:59.600 --> 00:05:04.400 +at connecting this level +of language fluency with knowledge, + +00:05:05.240 --> 00:05:08.480 +improving accuracy, +detecting and removing bias, + +00:05:09.040 --> 00:05:13.720 +this opens up so many new possibilities +for interaction models + +00:05:14.040 --> 00:05:17.680 +and ways to find to consume +information in the future. + +00:05:18.800 --> 00:05:25.160 +So I'll come back to that later. + +00:05:25.160 --> 00:05:26.720 +For today's talk, I'm going to + +00:05:26.720 --> 00:05:28.880 +slice the topic a little bit differently. + +00:05:28.920 --> 00:05:34.040 +I want to focus on +some of the general research directions + +00:05:34.040 --> 00:05:37.160 +that I see as being important +for moving digital + +00:05:37.160 --> 00:05:39.320 +accessibility forward with AI. + +00:05:40.160 --> 00:05:43.520 +So in our opening keynote, Jutta laid out + +00:05:43.560 --> 00:05:46.520 +some of the risks +that can be associated with AI. + +00:05:46.560 --> 00:05:49.800 +It's not created and applied with equity + +00:05:50.120 --> 00:05:53.480 +and safety in mind, and it's important + +00:05:53.480 --> 00:05:57.440 +to keep these considerations +in mind as we move forward with A.I. + +00:05:58.520 --> 00:05:59.640 +When the benefits of + +00:05:59.640 --> 00:06:03.040 +AI do outweigh the risks in enabling +digital access, + +00:06:03.400 --> 00:06:07.200 +we still have a way to go in +making these benefits + +00:06:07.200 --> 00:06:10.520 +available to everyone, in fact, +to make them accessible + +00:06:10.960 --> 00:06:14.760 +so start +by talking about some current efforts + +00:06:15.200 --> 00:06:17.640 +in the direction making assistive + +00:06:17.800 --> 00:06:21.280 +AI itself more inclusive. + +00:06:21.280 --> 00:06:24.520 +The second topic I want to cover is where + +00:06:24.520 --> 00:06:27.800 +we choose to apply AI. + +00:06:28.640 --> 00:06:30.920 +Focusing on why I called it + +00:06:31.200 --> 00:06:35.040 +AI at source and find web + +00:06:35.040 --> 00:06:37.840 +accessibility work in role emphasizes + +00:06:39.200 --> 00:06:43.480 +the need to shift left that is, +to bake accessibility + +00:06:43.480 --> 00:06:47.960 +in as early as possible in the development +of a digital experience. + +00:06:48.160 --> 00:06:51.920 +So I'll discuss some of the places +where AI can help with that shift + +00:06:51.920 --> 00:06:56.760 +left and highlight both opportunities +and important emerging challenges + +00:06:57.960 --> 00:07:04.600 +that we have for web accessibility. + +00:07:04.600 --> 00:07:08.240 +So we know the AI has already changed + +00:07:08.240 --> 00:07:11.360 +the landscape of assistive technology. + +00:07:11.360 --> 00:07:16.880 +So one research direction is how +do we make these AI models more inclusive? + +00:07:18.520 --> 00:07:19.360 +And I want to + +00:07:19.360 --> 00:07:21.680 +start with a little story about captions. + +00:07:22.440 --> 00:07:25.200 +In 2020, I was accessibility chair + +00:07:25.200 --> 00:07:28.200 +for a very large virtual conference. + +00:07:29.520 --> 00:07:32.000 +We provided a human captioner + +00:07:32.280 --> 00:07:37.040 +who was live transcribing the sessions +in a in a separate live feed. + +00:07:37.880 --> 00:07:41.560 +So I'm showing an image of a slide +from a presentation + +00:07:41.560 --> 00:07:44.200 +here with a transcription +window to the right. + +00:07:45.560 --> 00:07:49.520 +I spoke with a hard of hearing attendee +during the conference + +00:07:50.160 --> 00:07:53.080 +who used captions to supplement +what he could hear, + +00:07:53.720 --> 00:07:56.560 +and he told me +while the live feed had quite a delay. + +00:07:57.080 --> 00:08:00.680 +So he was also using automated captions + +00:08:00.680 --> 00:08:04.920 +that were being streamed +through the conference provider. + +00:08:05.000 --> 00:08:09.080 +Let's add them to this view +highlighted in Green. + +00:08:09.080 --> 00:08:13.520 +So these had a little less delay, +but they had accuracy problems, + +00:08:13.840 --> 00:08:18.440 +especially for foreign speakers +or people with atypical speech, + +00:08:18.880 --> 00:08:21.840 +and especially for people's +names or technical terms. + +00:08:22.600 --> 00:08:25.760 +The important parts. + +00:08:25.760 --> 00:08:29.320 +So he also turned on +the automated captions in his browser, + +00:08:29.320 --> 00:08:32.040 +which used a different speech +to text engine. + +00:08:32.320 --> 00:08:34.360 +I've added those on the screen to + +00:08:36.200 --> 00:08:39.800 +and supplemented +that with an app on his phone + +00:08:40.160 --> 00:08:43.640 +using a third different speech +recognition engine, capturing + +00:08:43.640 --> 00:08:47.160 +the audio as it was played +from his computer and transcribing it. + +00:08:47.840 --> 00:08:51.360 +So that's four sources of captions +to read. + +00:08:52.080 --> 00:08:56.160 +None of them was perfect, +but he combined them to triangulate + +00:08:56.480 --> 00:08:59.280 +interpretations +where the transcripts seem to be wrong. + +00:09:00.920 --> 00:09:02.360 +So we could say AI + +00:09:02.360 --> 00:09:06.560 +powered captions were helping him +to access the conference, no doubt about it. + +00:09:06.560 --> 00:09:09.200 +But it wasn't a very usable experience. + +00:09:10.040 --> 00:09:13.680 +He was empowered, +but he also had a huge burden + +00:09:13.920 --> 00:09:16.600 +in managing his own accessibility. + +00:09:17.000 --> 00:09:19.520 +And there were still gaps, though, + +00:09:19.920 --> 00:09:24.960 +as Michael Cooper pointed out yesterday, +imperfect captions and descriptions + +00:09:25.280 --> 00:09:29.000 +can provide agency +but can also mislead users + +00:09:29.000 --> 00:09:33.440 +and and waste their time. + +00:09:33.440 --> 00:09:36.080 +I also want to point out +that this particular user + +00:09:36.080 --> 00:09:41.440 +was in a really privileged position +because he knows about all these services. + +00:09:41.760 --> 00:09:45.360 +He has devices powerful enough to stream +all these channels, + +00:09:45.640 --> 00:09:47.520 +has good Internet access. + +00:09:47.520 --> 00:09:50.720 +He has a smartphone, +he has the cognitive ability + +00:09:50.720 --> 00:09:54.560 +to make sense of this incredible +information overload. + +00:09:55.720 --> 00:09:57.760 +This really isn't equitable access. + +00:09:57.760 --> 00:09:58.280 +Right. + +00:09:58.520 --> 00:10:02.280 +And and the captions themselves weren't +providing equitable + +00:10:02.280 --> 00:10:04.800 +representation of the conference speakers + +00:10:05.280 --> 00:10:09.080 +to those with atypical speech +were at a disadvantage + +00:10:09.280 --> 00:10:11.840 +in having their message +communicated clearly. + +00:10:12.640 --> 00:10:17.640 +So there's an important gap to be filled. + +00:10:17.640 --> 00:10:20.800 +One of the current limitations +of automated captions is poor + +00:10:20.800 --> 00:10:23.520 +transcription of people +with atypical speech, + +00:10:23.840 --> 00:10:27.360 +and especially when they're using +technical or specialized language. + +00:10:28.480 --> 00:10:30.560 +So for example, Dimitri Kanevsky + +00:10:30.560 --> 00:10:32.840 +is a Google researcher and inventor. + +00:10:33.800 --> 00:10:38.680 +He's an expert in optimization +and algebraic geometry, among many other + +00:10:38.680 --> 00:10:39.560 +topics. + +00:10:40.000 --> 00:10:43.920 +He's Russian and deaf, +both of which affect his English speech. + +00:10:44.600 --> 00:10:54.280 +I'm going to play a short video +clip of Dimitri. + +00:11:10.400 --> 00:11:12.320 +So, Dimitri said + +00:11:12.320 --> 00:11:15.440 +Google has very good +general speech recognition, + +00:11:15.480 --> 00:11:19.200 +but if you do not sound like most people, +it will not understand you. + +00:11:19.880 --> 00:11:24.440 +And on the screen a speech engine +translated that last part of the sentence + +00:11:24.440 --> 00:11:28.040 +as. But if you look at most of people +it will look + +00:11:28.040 --> 00:11:29.920 +and defend you. + +00:11:31.600 --> 00:11:33.160 +So, people + +00:11:33.160 --> 00:11:36.960 +with disabilities +that impact speech such as verbal palsy, + +00:11:37.040 --> 00:11:41.960 +stroke, Down's Syndrome, +Parkinson's, ALS are also impacted + +00:11:41.960 --> 00:11:46.800 +by lack of access to speech recognition, +whether it's for controlling a digital + +00:11:46.800 --> 00:11:50.720 +assistant, communicating with others, +or creating accessible + +00:11:50.720 --> 00:11:56.280 +digital content. Oh, + +00:11:57.680 --> 00:12:07.160 +I want to go to the next slide. + +00:12:07.160 --> 00:12:11.840 +So Google's Project Euphonia +set out to explore + +00:12:11.840 --> 00:12:16.280 +whether personalized speech recognition +models can provide accurate speech + +00:12:16.280 --> 00:12:19.640 +recognition for people like Dimitri +with atypical speech. + +00:12:20.280 --> 00:12:22.760 +And this is a great example of the way +research + +00:12:22.960 --> 00:12:25.600 +can help +to move the state of the art forward. + +00:12:25.600 --> 00:12:28.600 +So the first challenge, as many people + +00:12:28.600 --> 00:12:32.440 +have mentioned already +today, was a lack of suitable speech data. + +00:12:33.440 --> 00:12:34.560 +Project Euphonia + +00:12:34.560 --> 00:12:37.520 +collected over a million utterances + +00:12:37.520 --> 00:12:40.240 +from individuals with speech impairments. + +00:12:41.040 --> 00:12:43.520 +And then the researchers built individual + +00:12:43.520 --> 00:12:47.640 +models for 432 people and compared them + +00:12:47.960 --> 00:12:50.200 +to state of the art general models. + +00:12:51.680 --> 00:12:56.440 +They found that the personalized models +could significantly reduce + +00:12:56.560 --> 00:13:01.080 +word error rates, and so the error rates +had gone from something like 31% + +00:13:01.080 --> 00:13:04.880 +with the general models down to 4.6%. + +00:13:05.720 --> 00:13:08.360 +So it's not just +a significant improvement, + +00:13:08.360 --> 00:13:10.160 +but it's enough of improvement. + +00:13:10.160 --> 00:13:16.160 +It gets to a high enough point +to make the technology practically useful. + +00:13:16.160 --> 00:13:19.800 +And in fact, they've even found +that these personalized models + +00:13:20.200 --> 00:13:23.960 +could sometimes perform +better than human transcribers for people + +00:13:23.960 --> 00:13:26.520 +with more severely disordered speech. + +00:13:30.560 --> 00:13:33.200 +So here's an example of Dimitri + +00:13:33.200 --> 00:13:42.520 +using his personal speech +recognition model. + +00:13:57.840 --> 00:14:04.640 +You can see the. + +00:14:04.640 --> 00:14:08.840 +So the transcription +this time is make all voice interactive + +00:14:08.840 --> 00:14:12.600 +devices be able to understand any person +speak to them. + +00:14:13.320 --> 00:14:15.760 +It's not perfect, but it's much, much + +00:14:16.520 --> 00:14:18.000 +more useful. + +00:14:18.760 --> 00:14:22.920 +Project Euphonia has started in English, +but it's now expanding to include + +00:14:22.920 --> 00:14:32.320 +Hindi, French, Spanish and Japanese. + +00:14:32.320 --> 00:14:35.840 +So that project demonstrated +how much better + +00:14:35.840 --> 00:14:39.040 +speech recognition +technology could be. + +00:14:39.360 --> 00:14:43.280 +But the original data wasn't +shareable outside Google + +00:14:43.720 --> 00:14:48.680 +and that limited the benefits +of all that data gathering effort. + +00:14:48.680 --> 00:14:53.040 +So the Speech Accessibility Project +at the University of Illinois + +00:14:53.040 --> 00:14:56.360 +is an example of +of what we might do about that problem. + +00:14:56.760 --> 00:15:01.280 +It's an initiative to make a dataset +for broader research purposes. + +00:15:02.080 --> 00:15:05.400 +It was launched in 2022, +and it's a coalition + +00:15:05.400 --> 00:15:10.640 +of technologists, academic researchers +and community organizations. + +00:15:11.240 --> 00:15:15.560 +The goal is to collect +a diverse speech dataset for training + +00:15:15.560 --> 00:15:19.760 +speech recognition models to do better +at recognizing atypical speech. + +00:15:21.400 --> 00:15:24.080 +It's building on some of the lessons +learned in Project + +00:15:24.080 --> 00:15:28.400 +Euphonia, paying attention +to Ethical data collection. + +00:15:28.920 --> 00:15:31.400 +So individuals are paid for participating. + +00:15:31.400 --> 00:15:34.720 +Their samples are de-identified +to protect privacy. + +00:15:35.280 --> 00:15:36.680 +The dataset is private. + +00:15:36.680 --> 00:15:39.800 +It is managed by UIUC + +00:15:40.320 --> 00:15:45.200 +and will be made available +for research purposes, + +00:15:45.200 --> 00:15:49.360 +and this effort is backed +by cross-industry very broad support + +00:15:49.360 --> 00:15:52.960 +from Amazon, Apple, Google, +Meta and Microsoft. + +00:15:53.720 --> 00:15:57.040 +It's going to enable +both academic researchers + +00:15:57.040 --> 00:15:59.160 +and industry partners to make progress. + +00:16:00.920 --> 00:16:02.480 +Although the current work is focused on + +00:16:02.480 --> 00:16:07.720 +speech data, this is in general +a model that could be used for other data + +00:16:07.960 --> 00:16:10.560 +that's needed to make models +more inclusive + +00:16:11.720 --> 00:16:14.000 +so we could think of touch data. + +00:16:14.720 --> 00:16:18.600 +And there are already significant efforts +going on together. + +00:16:18.600 --> 00:16:26.040 +Sign language video data +for for sign language translation. + +00:16:26.040 --> 00:16:29.680 +Then Project Relate +is an example of the kind of app + +00:16:29.840 --> 00:16:31.960 +that can be developed +with this kind of data. + +00:16:32.520 --> 00:16:36.800 +It's an Android app +that provides individuals + +00:16:36.800 --> 00:16:40.440 +with the ability to build their own +personalized speech models + +00:16:40.880 --> 00:16:44.720 +and then use them for text to speech, +for communication, + +00:16:45.000 --> 00:16:51.200 +for communicating with home assistants. + +00:16:51.200 --> 00:16:54.720 +Personalized speech +models look really promising, + +00:16:55.720 --> 00:16:58.880 +and potentially a similar approach +could be taken into build + +00:16:58.880 --> 00:17:02.760 +personalized models +for other things like gesture recognition, + +00:17:02.800 --> 00:17:07.040 +touch screen interactions, +interpreting inaccurate typing. + +00:17:07.160 --> 00:17:08.320 +I think there's + +00:17:08.320 --> 00:17:17.800 +there's a world of opportunity there +that we haven't really begun to explore. + +00:17:17.800 --> 00:17:21.800 +So now that we know we can build +effective personal models + +00:17:22.160 --> 00:17:26.120 +from just a few hundred utterances, +can we learn from this? + +00:17:26.400 --> 00:17:31.520 +How to build more inclusive general models +would be a really important goal. + +00:17:32.240 --> 00:17:34.720 +Can we improve the +performance even further + +00:17:34.720 --> 00:17:38.480 +by drawing on a person's +frequently used vocabulary? + +00:17:39.080 --> 00:17:41.800 +Can we prime models with vocabulary + +00:17:41.800 --> 00:17:45.400 +from the current context? + +00:17:45.400 --> 00:17:50.680 +And as Shivan Sing mentioned yesterday, +we're beginning to be able to combine + +00:17:50.680 --> 00:17:53.760 +text, image and audio sources to provide + +00:17:53.760 --> 00:17:56.480 +a richer context for AI to use. + +00:17:57.080 --> 00:17:59.640 +So there's very fast progress happening + +00:17:59.640 --> 00:18:03.320 +in all of these areas. + +00:18:03.320 --> 00:18:08.000 +Just as another example, the best student +paper at the Assets 2022 conference + +00:18:08.360 --> 00:18:13.480 +was using vocabularies that were generated +automatically from from photographs + +00:18:13.720 --> 00:18:18.120 +to prime the word prediction +component of a communication system, + +00:18:18.520 --> 00:18:21.320 +for a more efficient conversation around those + +00:18:21.320 --> 00:18:24.920 +photographs. + +00:18:24.920 --> 00:18:25.480 +Finally, + +00:18:26.480 --> 00:18:28.400 +bring your own model. + +00:18:28.400 --> 00:18:34.080 +I really agree with Shaomei Wu +when she said yesterday + +00:18:34.400 --> 00:18:39.720 +that use cases of media +creation are under investigated, + +00:18:40.280 --> 00:18:43.880 +we can apply personalized models +in content creation. + +00:18:44.400 --> 00:18:47.360 +Think about plugging +in your personal speech model + +00:18:47.680 --> 00:18:52.720 +to contribute captions for your +livestreamed audio for this meeting. + +00:18:52.720 --> 00:18:56.720 +The potential is huge +and web standards might need to evolve + +00:18:56.720 --> 00:19:03.720 +to support +some of these kinds of use cases. + +00:19:03.720 --> 00:19:04.320 +Okay. + +00:19:05.000 --> 00:19:08.840 +Next part. +When we when we talk about assistive AI, + +00:19:09.120 --> 00:19:11.640 +we're often talking about technologies +that are + +00:19:11.880 --> 00:19:15.920 +that are being applied at the point +of consumption, helping an individual + +00:19:15.920 --> 00:19:20.960 +to overcome accessibility barriers +in digital content or in the world. + +00:19:20.960 --> 00:19:26.640 +And I want to focus this section on +AI at source and why that is so important. + +00:19:27.520 --> 00:19:30.920 +Powerful AI tools in the hands of users + +00:19:31.360 --> 00:19:34.080 +don't mean that authors +can forget about accessibility, + +00:19:35.000 --> 00:19:39.320 +and we've been talking about many examples +of this through this symposium. + +00:19:39.320 --> 00:19:45.760 +But here are a few +that appealed to me. + +00:19:45.760 --> 00:19:48.760 +So I'm showing a figure from a paper. + +00:19:49.280 --> 00:19:52.520 +The figure is captioned user response time + +00:19:52.520 --> 00:19:57.200 +by authentication condition, +and the figure itself is a box plot. + +00:19:57.200 --> 00:20:00.920 +It shows response times from an experiment +for six different + +00:20:01.040 --> 00:20:03.400 +experimental conditions. + +00:20:03.400 --> 00:20:06.040 +So it's a pretty complex figure. + +00:20:06.400 --> 00:20:11.160 +And if I'm going to publish this +in my paper, my paper is available + +00:20:11.160 --> 00:20:15.000 +and I need to provide a description +of this image. + +00:20:15.000 --> 00:20:16.520 +There's so much information here. + +00:20:18.080 --> 00:20:20.480 +When faced with this task, + +00:20:20.480 --> 00:20:24.520 +about 50% of of academic authors + +00:20:24.520 --> 00:20:28.120 +resort to simply +repeating the caption of the figure. + +00:20:28.920 --> 00:20:32.240 +And this is really no help at all +to a blind scholar. + +00:20:32.680 --> 00:20:35.040 +They can already read the caption +that's in text. + +00:20:35.800 --> 00:20:38.720 +So usually the caption is saying + +00:20:38.920 --> 00:20:42.200 +what information you'll find +in the figure, but it's not giving you + +00:20:42.200 --> 00:20:46.640 +the actual information +that's in the figure. + +00:20:46.640 --> 00:20:49.440 +Now, as we discussed in yesterday's panel, + +00:20:50.200 --> 00:20:53.400 +that blind scholar reading +my paper could use + +00:20:53.440 --> 00:20:57.800 +AI to get a description of the figure, +but AI doesn't + +00:20:57.800 --> 00:21:01.520 +really have the context +to generate a good description. + +00:21:02.080 --> 00:21:06.040 +Only the author knows +what's important to convey. + +00:21:06.040 --> 00:21:09.120 +At the same time, +most authors aren't familiar + +00:21:09.120 --> 00:21:12.120 +with the guidelines +for describing images like this, + +00:21:12.800 --> 00:21:16.080 +and writing +a description can seem like a chore. + +00:21:16.080 --> 00:21:20.320 +That's why I really love the ideas +that Amy Powell shared yesterday + +00:21:20.320 --> 00:21:25.680 +for ways that a tool could help content +creators with their own description tasks, + +00:21:26.840 --> 00:21:29.480 +perhaps by generating an overall structure + +00:21:30.000 --> 00:21:32.720 +or an initial attempt +that a person can edit. + +00:21:33.360 --> 00:21:34.160 +I mean, there are + +00:21:34.160 --> 00:21:37.760 +there are existing guidelines +for describing different kinds of chart. + +00:21:38.200 --> 00:21:43.520 +Why not teach AI +how to identify different kinds of chart + +00:21:43.520 --> 00:21:51.280 +and sort of generate +a beginning description? + +00:21:51.280 --> 00:21:55.800 +And Shivam Singh was talking yesterday as +well about recent progress in this area. + +00:21:57.120 --> 00:22:00.200 +So ideally AI could refine + +00:22:00.560 --> 00:22:03.800 +its text in an interactive dialog +with the author + +00:22:04.240 --> 00:22:07.640 +and then the resulting description +would be provided in the paper + +00:22:07.880 --> 00:22:13.440 +and anyone could access it +whether or not they had their own AI. + +00:22:13.440 --> 00:22:17.720 +So that's what I mean by applying +AI at source where there's a person + +00:22:17.720 --> 00:22:20.960 +with the context to make sure +the description is appropriate + +00:22:21.560 --> 00:22:23.480 +and that can provide a better description. + +00:22:24.800 --> 00:22:27.080 +Of course, +it can only provide one description. + +00:22:27.080 --> 00:22:31.720 +There is also an important role +for image understanding that can support + +00:22:31.960 --> 00:22:34.360 +personalized exploration of images + +00:22:35.600 --> 00:22:37.880 +so that a reader could query information + +00:22:37.880 --> 00:22:40.200 +that wasn't available in a short +description. + +00:22:41.040 --> 00:22:43.200 +Like what were the maximum and minimum + +00:22:43.200 --> 00:22:46.200 +response times for the gesture condition +in this experiment? + +00:22:46.880 --> 00:22:52.520 +I'm not saying that AI at source +is the only solution, but it's important + +00:22:52.520 --> 00:22:58.000 +and perhaps it's an undeveloped piece. + +00:22:58.000 --> 00:22:59.320 +Here's the second example. + +00:22:59.320 --> 00:23:01.120 +I love examples + +00:23:01.880 --> 00:23:04.120 +as we were just talking about +in the earlier panel. + +00:23:04.120 --> 00:23:08.240 +Text transformations +can make written content more accessible. + +00:23:08.880 --> 00:23:12.040 +So, for example, using literal language + +00:23:12.040 --> 00:23:14.760 +is preferable for cognitive accessibility. + +00:23:15.680 --> 00:23:19.880 +So an idiom like she was in for a penny +in for a pound + +00:23:20.400 --> 00:23:26.040 +can be hard to spot if you're not familiar +with that particular idiom. + +00:23:26.040 --> 00:23:29.560 +It can be very confusing +if you try to interpret it literally. + +00:23:30.680 --> 00:23:31.880 +Content authors might + +00:23:31.880 --> 00:23:34.160 +use this kind of language +without realizing + +00:23:35.200 --> 00:23:40.120 +language models could transform text +to improve accessibility in many ways. + +00:23:40.120 --> 00:23:45.240 +And one is by replacing idioms +with more literal phrasing. + +00:23:45.240 --> 00:23:48.800 +So here I asked a language model +to rephrase the sentence + +00:23:48.800 --> 00:23:52.640 +without the idiom, +and it came up with a very sensible, + +00:23:52.760 --> 00:23:55.520 +although a little complex +little literal replacement. + +00:23:55.880 --> 00:23:59.600 +She decided to fully commit +to the situation no matter the cost. + +00:24:01.560 --> 00:24:03.320 +Again, this could be applied + +00:24:03.320 --> 00:24:06.440 +as a user tool and as a tool for authors + +00:24:06.800 --> 00:24:10.640 +to help them identify where their writing +could be misinterpreted. + +00:24:11.680 --> 00:24:14.200 +The one puts the onus on the consumer + +00:24:14.240 --> 00:24:18.680 +to bring their own solution, apply +it and be alert for potential mistakes. + +00:24:19.120 --> 00:24:21.920 +The other fixes +the potential access problems + +00:24:22.120 --> 00:24:25.120 +at source, where the author +can verify accuracy + +00:24:27.040 --> 00:24:30.240 +and as I mentioned +earlier, because today's large language + +00:24:30.240 --> 00:24:34.520 +models are not connected to a ground truth +and they do have a tendency + +00:24:34.520 --> 00:24:35.800 +to hallucinate. + +00:24:35.800 --> 00:24:39.160 +Applying them at source +is one way to reap the benefits + +00:24:39.160 --> 00:24:43.640 +much more quickly +without risking harm to vulnerable users. + +00:24:43.640 --> 00:24:47.200 +Once we collect language models, +connect them to facts + +00:24:48.280 --> 00:24:50.560 +or connect +speech to the domain of discourse, + +00:24:50.920 --> 00:24:53.760 +we will really see a huge leap +in performance, + +00:24:54.280 --> 00:25:01.000 +reliability and trustworthiness. + +00:25:01.000 --> 00:25:05.840 +So in the previous two examples, +AI could be applied at source. + +00:25:06.440 --> 00:25:10.040 +What about when the +AI has to be on the consumer side, + +00:25:10.200 --> 00:25:13.800 +like when using text to speech, +to read out text on the web. + +00:25:15.280 --> 00:25:15.840 +On the screen + +00:25:15.840 --> 00:25:20.120 +here is the start of the Google +information sidebar about Edinburgh. + +00:25:20.480 --> 00:25:25.720 +The capital city of Scotland. Is a heading, +subheading in the main text paragraph. + +00:25:27.240 --> 00:25:28.760 +Text to speech is making + +00:25:28.760 --> 00:25:34.600 +huge advances with more and more natural +sounding voices becoming available + +00:25:34.600 --> 00:25:38.160 +and the capability +of more expressive speech, + +00:25:38.480 --> 00:25:41.560 +which itself makes comprehension +more easy. + +00:25:42.840 --> 00:25:45.080 +And expressiveness can include things + +00:25:45.080 --> 00:25:47.840 +like adjusting the volume, the prosody. + +00:25:49.120 --> 00:25:52.720 +When reading a heading, maybe +I would naturally read it a little louder. + +00:25:53.200 --> 00:25:55.280 +Pause afterwards. + +00:25:55.960 --> 00:26:00.760 +For a TTS service to do +the best job reading out text on the web. + +00:26:01.240 --> 00:26:07.520 +It helps to have the semantics +explicitly expressed. + +00:26:07.520 --> 00:26:12.240 +So for example, the use of heading mark up +on Edinburgh on this passage. + +00:26:13.480 --> 00:26:16.280 +It's also important +that domain specific terms + +00:26:16.280 --> 00:26:19.920 +and people's names or place +names are pronounced correctly. + +00:26:20.600 --> 00:26:23.920 +So many people +not from the UK on first sight, + +00:26:23.920 --> 00:26:26.960 +they pronounce Edinburgh. + +00:26:27.760 --> 00:26:28.520 +Web standards, + +00:26:28.520 --> 00:26:32.520 +if they're applied properly, +can mark up the semantics like headings + +00:26:32.520 --> 00:26:37.680 +and pronunciation of specialized +or unusual words, helping the downstream AI + +00:26:37.760 --> 00:26:41.320 +to perform better. In fact, + +00:26:42.520 --> 00:26:42.880 +AI could + +00:26:42.880 --> 00:26:47.600 +also be used to identify the intended +structure and compare against the markup, + +00:26:48.120 --> 00:26:51.440 +or identify unusual words or acronyms +where + +00:26:51.440 --> 00:26:53.840 +pronunciation information +could be helpful, + +00:26:54.760 --> 00:26:59.120 +and then the passage can be read +appropriately by your preferred text + +00:26:59.120 --> 00:27:02.160 +to speech Voice at your preferred +speed and pitch. + +00:27:04.640 --> 00:27:07.480 +Can also be used by a speech + +00:27:07.480 --> 00:27:11.320 +to text model +to marry the vocabulary on the page + +00:27:11.320 --> 00:27:14.120 +with what you're saying +as you're interacting with the page + +00:27:16.120 --> 00:27:18.680 +to use voice controls. + +00:27:18.680 --> 00:27:23.400 +So I'm showing this example to illustrate +the web accessibility standards + +00:27:23.600 --> 00:27:28.840 +work together with assistive AI techniques +to enable the best outcome. And + +00:27:29.480 --> 00:27:34.120 +many uses of assistive technology +can benefit from this information. + +00:27:34.120 --> 00:27:37.480 +So thinking about applying AI at source, + +00:27:37.960 --> 00:27:40.840 +there's an important role here for A.I. + +00:27:40.840 --> 00:27:44.200 +that makes sure that the visual +and structural DOM + +00:27:44.280 --> 00:27:52.360 +representations are aligned. + +00:27:52.360 --> 00:27:55.320 +So I just want to reiterate the + +00:27:56.760 --> 00:28:00.200 +the powerful benefits of applying AI +at authoring time + +00:28:00.440 --> 00:28:04.200 +that these examples illustrate. + +00:28:04.200 --> 00:28:07.520 +So for software, +removing the burden from people + +00:28:07.520 --> 00:28:11.080 +with disabilities +to supply their own tools to bridge gaps. + +00:28:12.040 --> 00:28:14.400 +Secondly, it benefits more people, + +00:28:14.400 --> 00:28:17.880 +including those people +who don't have access to the AI tools, + +00:28:18.280 --> 00:28:21.960 +people with low end devices, +poor internet connectivity, + +00:28:22.160 --> 00:28:25.760 +less technology literacy. + +00:28:25.760 --> 00:28:31.080 +Thirdly, content creator can verify +the accuracy and safety of suggestions, + +00:28:31.320 --> 00:28:35.400 +mitigating harms from bias or errors +because they have the context + +00:28:36.560 --> 00:28:41.280 +and AI can also potentially mitigate harms +in other ways. + +00:28:41.280 --> 00:28:47.640 +For example, flagging videos, images +or animations that might trigger adverse + +00:28:47.640 --> 00:28:50.720 +health consequences +for some people like flashing lights. + +00:28:52.400 --> 00:28:55.440 +So AI inside is likely to reach more people + +00:28:56.080 --> 00:28:58.440 +than AI provided by end users. + +00:28:58.440 --> 00:29:01.720 +I think this is how +we'll get the most benefit for the least + +00:29:01.720 --> 00:29:06.080 +harm. + +00:29:06.080 --> 00:29:11.360 +It's also a huge opportunity +to make accessibility easier to achieve + +00:29:12.280 --> 00:29:16.760 +AI can make it much quicker and easier +to generate the accessibility information + +00:29:17.000 --> 00:29:19.880 +like captions or image descriptions +as we've discussed, + +00:29:20.680 --> 00:29:23.640 +and lowering the barrier to entry with +assistive + +00:29:24.440 --> 00:29:29.840 +tools is one way to encourage +good accessibility practice. + +00:29:29.840 --> 00:29:33.680 +AI can proactively identify +where accessibility work is needed + +00:29:35.680 --> 00:29:37.720 +and evaluate designs + +00:29:37.720 --> 00:29:40.000 +before +even a line of code has been written. + +00:29:41.720 --> 00:29:43.240 +But perhaps + +00:29:43.240 --> 00:29:47.480 +the biggest opportunity and +the greatest need for our attention + +00:29:47.880 --> 00:29:50.920 +is the use of AI to generate code. + +00:29:51.440 --> 00:29:54.000 +Which brings us to the final section + +00:29:54.800 --> 00:29:56.720 +of this talk. + +00:29:57.120 --> 00:29:59.480 +So in previous section + +00:29:59.480 --> 00:30:02.200 +we talked about ways that I can be applied + +00:30:02.480 --> 00:30:05.800 +in content creation +to help build accessibility in, + +00:30:06.800 --> 00:30:11.120 +but AI itself is also impacting +the way websites are designed + +00:30:11.120 --> 00:30:14.760 +and developed +independent of accessibility. + +00:30:14.760 --> 00:30:19.640 +So in this section, let's +think about how this change will impact + +00:30:19.640 --> 00:30:23.320 +our ability to bake accessibility in +and can we use + +00:30:23.480 --> 00:30:25.320 +AI to help us. + +00:30:28.440 --> 00:30:30.800 +As accessibility advocates + +00:30:30.800 --> 00:30:36.560 +we have long been pushing +the need to shift left, and by that + +00:30:36.560 --> 00:30:41.680 +we mean paying attention to accessibility +right from the start of a project. + +00:30:42.080 --> 00:30:44.080 +When you're understanding +the market potential, + +00:30:44.080 --> 00:30:47.880 +when you're gathering requirement, +when you're understanding and evaluating + +00:30:47.880 --> 00:30:53.000 +risks, developing designs and developing +the code that implements those designs. + +00:30:55.600 --> 00:30:56.440 +In a reactive + +00:30:56.440 --> 00:30:59.720 +approach to accessibility, +which is too often what happens, + +00:31:00.400 --> 00:31:04.960 +the first attention to accessibility comes +when automated tools + +00:31:04.960 --> 00:31:07.760 +are run on an already implemented system. + +00:31:09.520 --> 00:31:11.680 +Even then, such tools + +00:31:11.680 --> 00:31:14.480 +don't find all issues and + +00:31:15.840 --> 00:31:17.920 +may not even find +the most significant ones + +00:31:18.440 --> 00:31:21.280 +which can lead teams to prioritize poorly. + +00:31:22.240 --> 00:31:25.880 +So with that, our reactive approach + +00:31:26.480 --> 00:31:30.920 +teams, can be kind of +overwhelmed with hundreds + +00:31:30.920 --> 00:31:35.360 +or even thousands of issues +kind of late in their process and + +00:31:36.760 --> 00:31:38.360 +have difficulty tackling it. + +00:31:38.360 --> 00:31:44.240 +It makes accessibility +seem much harder than than it could be. + +00:31:44.240 --> 00:31:47.280 +So this morning's panel, +we discussed ways that AI can be used + +00:31:47.600 --> 00:31:50.560 +in testing to help +find accessibility problems. + +00:31:52.080 --> 00:31:54.800 +AI is also already being used earlier + +00:31:54.800 --> 00:31:57.600 +in the process by designers and developers. + +00:31:58.840 --> 00:32:00.680 +In development, for example, + +00:32:01.880 --> 00:32:04.040 +GitHub Copilot is + +00:32:04.040 --> 00:32:07.200 +AI model that makes code completion +predictions + +00:32:07.880 --> 00:32:12.200 +and GitHub claims that in files +where it's turned on nearly + +00:32:12.200 --> 00:32:18.920 +40% of code is being written by GitHub +copilot in popular + +00:32:18.960 --> 00:32:23.520 +coding languages. + +00:32:23.520 --> 00:32:27.640 +There's also systems +that generate code from design wireframes + +00:32:27.880 --> 00:32:32.120 +or from high resolution mockups, +or even from text prompts. + +00:32:32.120 --> 00:32:34.920 +So it's incumbent on us to ask + +00:32:35.240 --> 00:32:37.560 +what data are those systems trained on + +00:32:38.800 --> 00:32:41.480 +in the case of copilot +is trained on GitHub + +00:32:41.480 --> 00:32:45.360 +open source project code. + +00:32:45.360 --> 00:32:48.920 +So what's the probability +that this existing code is accessible? + +00:32:49.880 --> 00:32:54.800 +We know that we still have a lot of work +to do to make digital accessibility + +00:32:54.800 --> 00:32:58.320 +the norm on the web. +Today is the exception, + +00:32:59.680 --> 00:33:03.040 +and many of you +probably know WebAIM does + +00:33:03.040 --> 00:33:07.280 +an annual survey +of the top million website home pages. + +00:33:07.800 --> 00:33:11.920 +It runs an automated tool +and puts the issues that it found + +00:33:12.800 --> 00:33:15.200 +almost 97% of + +00:33:15.600 --> 00:33:19.480 +the million pages +had accessibility issues, + +00:33:20.000 --> 00:33:22.520 +and that's only the automatically +detectable ones. + +00:33:23.960 --> 00:33:26.840 +They found +an average of 50 issues per page. + +00:33:26.840 --> 00:33:30.440 +And they also found the page +complexity is growing + +00:33:30.440 --> 00:33:33.480 +significantly. + +00:33:33.480 --> 00:33:37.160 +Over 80% of the pages +they looked at had low contrast + +00:33:37.400 --> 00:33:40.720 +text issues. + +00:33:40.720 --> 00:33:44.640 +More than half had alternative text +missing for images, + +00:33:45.440 --> 00:33:47.560 +almost half had missing form labels. + +00:33:48.160 --> 00:33:51.160 +So even though these are issues, +they're easy to find with + +00:33:51.160 --> 00:33:54.800 +the automated tools that we have today, +they're still not being addressed. + +00:33:55.240 --> 00:33:59.840 +These are very basic accessibility issues +and they're everywhere. + +00:33:59.840 --> 00:34:03.440 +So we know what this will surely mean +for AI models + +00:34:03.600 --> 00:34:07.080 +learning from today's web. + +00:34:07.120 --> 00:34:10.840 +And here's an example +of how this might be playing out already. + +00:34:11.520 --> 00:34:14.840 +So code + +00:34:14.840 --> 00:34:18.560 +snippets are one of the most common +things that developers search for, + +00:34:19.280 --> 00:34:23.720 +and a large language +model can come up with pretty decent code snippets. + +00:34:23.720 --> 00:34:27.760 +And this is this is a game changer +for developers and it's already happening. + +00:34:28.240 --> 00:34:31.480 +So let's say a +developer is new to Flutter. + +00:34:31.920 --> 00:34:36.120 +Flutter is Google's open +source mobile app development platform. + +00:34:36.960 --> 00:34:40.920 +They want to create a button labeled +with an icon known as an icon button. + +00:34:41.880 --> 00:34:43.280 +So on this slide + +00:34:43.280 --> 00:34:49.560 +is the code that ChatGPT produces +when it's asked for a Flutter code + +00:34:49.560 --> 00:34:51.760 +for an icon button + +00:34:51.920 --> 00:34:56.480 +along with the code snippet, +it also provided some explanation + +00:34:56.480 --> 00:34:58.720 +and it even links +to the documentation page. + +00:34:58.720 --> 00:35:00.800 +So it's pretty useful. + +00:35:01.240 --> 00:35:03.080 +And the code it gave for an icon + +00:35:03.080 --> 00:35:08.120 +button includes a reference to what icons +to use and a function to execute + +00:35:08.520 --> 00:35:13.640 +when the button is pressed. + +00:35:13.640 --> 00:35:18.160 +There's really just one +important difference between this example + +00:35:18.160 --> 00:35:22.280 +generated by ChatGPT and the example given + +00:35:22.280 --> 00:35:26.480 +in the Flutter documentation. + +00:35:26.480 --> 00:35:29.320 +ChatGPT didn't include a tooltip, + +00:35:30.000 --> 00:35:33.560 +which means there's +no text label associated with this button. + +00:35:34.240 --> 00:35:37.880 +That's an accessibility problem. + +00:35:37.880 --> 00:35:39.000 +Let's give it credit. + +00:35:39.000 --> 00:35:41.520 +ChatGPT did mention that it's possible +to add a tool, + +00:35:42.280 --> 00:35:45.040 +but developers +look first at the code example. + +00:35:45.280 --> 00:35:49.880 +If it's not in the example, +it's easily missed. + +00:35:49.880 --> 00:35:54.080 +But in the training data here, +it seems the tooltip was not present + +00:35:54.320 --> 00:35:59.120 +enough of the time for it to surface +as an essential component of an icon button. + +00:36:02.120 --> 00:36:02.800 +So, you + +00:36:02.800 --> 00:36:05.840 +know, there's lots of example code +available online, + +00:36:05.840 --> 00:36:09.960 +but how much of that code demonstrates +accessible coding practices? + +00:36:10.560 --> 00:36:13.640 +Given the state of web accessibility, +it's likely + +00:36:13.640 --> 00:36:17.080 +the answer is not much. + +00:36:17.080 --> 00:36:22.200 +So our AI models are not going to learn +to generate accessible code. + +00:36:23.200 --> 00:36:27.080 +It's really it's just like the societal bias + +00:36:27.080 --> 00:36:31.400 +of the past being entrenched in training +sets of today. + +00:36:31.400 --> 00:36:36.840 +The past lack of accessibility +could be propagated into the future. + +00:36:36.840 --> 00:36:41.000 +So here we have an opportunity +and a potential risk. + +00:36:41.840 --> 00:36:44.320 +AI can help to write accessible code, + +00:36:44.840 --> 00:36:47.480 +but it needs to be trained +on accessible code + +00:36:47.720 --> 00:36:50.480 +or augmented with the tools that can + +00:36:50.480 --> 00:36:53.480 +correct accessibility issues. + +00:36:53.480 --> 00:36:55.760 +And I think it's important +to point out as well + +00:36:56.440 --> 00:37:00.200 +that I deliberately use +an example in a framework + +00:37:00.520 --> 00:37:06.640 +rather than HTML example, because that's +what developers are writing in these days. + +00:37:07.120 --> 00:37:10.000 +They're not writing raw HTML, + +00:37:10.440 --> 00:37:13.720 +their writing of frameworks, +and there are many, many different + +00:37:13.880 --> 00:37:18.520 +frameworks, each with +their own levels of accessibility and + +00:37:21.680 --> 00:37:22.400 +ways to + +00:37:22.400 --> 00:37:26.720 +incorporate accessibility. + +00:37:26.720 --> 00:37:28.840 +So one thing + +00:37:30.080 --> 00:37:34.560 +is that the theme of this morning +about data being really essential + +00:37:35.600 --> 00:37:37.680 +comes up here again. + +00:37:37.680 --> 00:37:39.960 +Do we have training data + +00:37:40.320 --> 00:37:44.800 +to train a code prediction model? + +00:37:44.800 --> 00:37:48.600 +Perhaps with transfer learning +to generate more accessible code? + +00:37:49.880 --> 00:37:52.840 +Do we have test sets even that we can test + +00:37:54.440 --> 00:37:55.880 +code generation + +00:37:55.880 --> 00:37:58.880 +for its ability +to produce accessible code? + +00:38:00.080 --> 00:38:04.160 +So when we're developing datasets + +00:38:04.480 --> 00:38:07.480 +for either training or testing, + +00:38:07.880 --> 00:38:09.960 +we have to think in terms of the diversity + +00:38:09.960 --> 00:38:15.000 +of of frameworks and methods +that developers are actually working with, + +00:38:15.000 --> 00:38:18.440 +if we want to catch those issues +at the point + +00:38:18.760 --> 00:38:20.800 +of creation. + +00:38:26.240 --> 00:38:28.320 +Again, where, where, + +00:38:28.320 --> 00:38:31.560 +AI is generating code +for a whole user interface + +00:38:31.880 --> 00:38:37.520 +based on a visual design, +we need to be thinking about what + +00:38:37.520 --> 00:38:43.320 +semantics should that design tool capture +to support the generation of code + +00:38:43.600 --> 00:38:46.800 +with the right structure, +the right roles for each area, + +00:38:47.120 --> 00:38:51.280 +the basic fundamentals +of accessibility. So + +00:38:52.880 --> 00:38:55.080 +a final call to action for + +00:38:55.080 --> 00:39:00.200 +the community here is to think about what +what do we need to do here, whether it is + +00:39:00.480 --> 00:39:06.080 +advocacy, awareness raising, research, +data gathering, standards + +00:39:06.440 --> 00:39:09.840 +or refining models +to write accessible code. + +00:39:10.760 --> 00:39:12.920 +This technology is so really young. + +00:39:12.920 --> 00:39:15.160 +It has a lot of room for improvement. + +00:39:15.160 --> 00:39:19.880 +This is a perfect time for us to define +how accessibility + +00:39:19.880 --> 00:39:25.160 +should be built in +and to experiment with different ways. + +00:39:25.160 --> 00:39:30.080 +And, you know, in my opinion, +this perhaps more than anything, is + +00:39:30.080 --> 00:39:34.360 +the trend that we need to get in front +of as an accessibility + +00:39:35.480 --> 00:39:36.760 +community, +before the poor + +00:39:36.760 --> 00:39:40.920 +practices of the past are entrenched +in the automated code + +00:39:40.920 --> 00:39:43.960 +generators of the future. + +00:39:43.960 --> 00:39:47.360 +AI is already shifting left, +so we must make sure + +00:39:47.360 --> 00:39:53.360 +accessibility goes with it. + +00:39:53.360 --> 00:40:00.080 +So to summarize, we can broaden access +to assistive AI through personalization. + +00:40:01.160 --> 00:40:01.600 +To get the + +00:40:01.600 --> 00:40:05.480 +benefits of AI based empowerment +to all users, + +00:40:05.480 --> 00:40:09.680 +we should make sure that AI integration +with authoring tools + +00:40:09.680 --> 00:40:15.280 +and processes is applied where it can +to make it easier to meet accessibility + +00:40:15.280 --> 00:40:17.920 +standards and improve the overall standard. + +00:40:18.600 --> 00:40:21.320 +Born accessible is still our goal + +00:40:22.520 --> 00:40:26.840 +and AI can help us get there +if we steer it right. As a community + +00:40:26.840 --> 00:40:30.560 +we have, we have a lot of work to do, +but I'm really excited + +00:40:30.640 --> 00:40:35.840 +about the potential here. + +00:40:35.840 --> 00:40:38.200 +So thank you all for listening. + +00:40:38.200 --> 00:40:41.960 +Thanks to my Google colleagues and the IBM + +00:40:41.960 --> 00:40:44.200 +accessibility team for + +00:40:45.920 --> 00:40:48.840 +feedback and ideas +and great conversations. + +00:40:49.440 --> 00:40:54.200 +And now I want to invite Jutta to to join + +00:40:54.960 --> 00:41:00.800 +and let's let's have the conversation. + +00:41:00.800 --> 00:41:02.600 +Thank you, Shari. + +00:41:02.600 --> 00:41:05.920 +And I really, really appreciate +your coverage of authoring + +00:41:05.920 --> 00:41:09.440 +and the prevention of barriers +and the emphasis on timely, + +00:41:09.440 --> 00:41:12.800 +proactive measures. + +00:41:12.800 --> 00:41:17.240 +There may be an opportunity +actually to relook at authoring + +00:41:17.560 --> 00:41:21.800 +environments, etc., within W3C. + +00:41:21.800 --> 00:41:26.400 +Yeah, actually, just just to respond +to that really quickly, I do wonder, like, + +00:41:26.480 --> 00:41:31.800 +should we be focusing +on evaluating frameworks + +00:41:31.800 --> 00:41:34.760 +more than evaluating individual pages? + +00:41:35.080 --> 00:41:37.800 +You know, +I would we get more bang for our buck + +00:41:38.120 --> 00:41:40.840 +if that was where we paid attention? + +00:41:40.840 --> 00:41:42.080 +Yes, exactly. + +00:41:42.080 --> 00:41:44.880 +The opportunity to add, and especially as + +00:41:46.160 --> 00:41:47.400 +these tools are + +00:41:47.400 --> 00:41:50.960 +now also assisting authors, +which was part of what + +00:41:51.480 --> 00:41:53.920 +the authoring of the authoring + +00:41:54.480 --> 00:41:57.000 +standards were looking at, prompting + +00:41:57.920 --> 00:42:03.080 +providing the necessary supports +and making it possible for individuals + +00:42:03.080 --> 00:42:07.080 +with disabilities +to also become authors of code + +00:42:07.080 --> 00:42:11.720 +and to produce code so the greater +participation of the community, + +00:42:12.480 --> 00:42:16.040 +I think, will create that +some of that culture shift. + +00:42:17.040 --> 00:42:21.200 +So thank you very much for covering this. + +00:42:21.200 --> 00:42:24.560 +So in terms of the questions +that we were going to talk about, + +00:42:24.840 --> 00:42:29.040 +you had suggested that we might start +with one of the thorny questions + +00:42:29.040 --> 00:42:33.320 +that was asked yesterday +that we didn't get time to respond to. + +00:42:34.520 --> 00:42:38.880 +So the the question was, +do you think that AI + +00:42:38.960 --> 00:42:44.240 +and big companies such as Google +and Meta driving research in AI + +00:42:44.240 --> 00:42:49.240 +can be problematic +with respect to social societal issues + +00:42:49.520 --> 00:42:52.480 +which don't necessarily garner +the highest revenue? + +00:42:52.920 --> 00:42:55.640 +And if so, +how do you think we can approach this? + +00:42:56.800 --> 00:42:57.960 +Yeah, thank you Jutta + +00:42:57.960 --> 00:43:01.440 +and thank you to the person +who asked that question too. + +00:43:03.000 --> 00:43:05.760 +It's true that company goals and society + +00:43:05.760 --> 00:43:09.440 +can pull in different directions. + +00:43:09.440 --> 00:43:12.480 +I do think there are benefits to having + +00:43:12.480 --> 00:43:16.920 +big companies working on these core models +because they often + +00:43:16.920 --> 00:43:21.880 +have better access to very large datasets +that can + +00:43:23.000 --> 00:43:25.040 +bring breakthroughs that then + +00:43:25.040 --> 00:43:27.440 +others can share, then that can help + +00:43:28.880 --> 00:43:31.320 +raise the tide +to raise all boats in a way. + +00:43:31.880 --> 00:43:34.840 +But advocacy and policy definitely have + +00:43:35.240 --> 00:43:39.640 +an important role to play in +guiding the application of AI, + +00:43:39.640 --> 00:43:45.560 +in the direction of AI research, +the ways that it's applied. + +00:43:45.560 --> 00:43:50.680 +Also, I wanted to say one approach here +could be through initiatives + +00:43:50.680 --> 00:43:54.320 +like the Speech Accessibility Project +that I talked about. + +00:43:54.680 --> 00:43:57.320 +So that's an example of of big tech +working + +00:43:57.320 --> 00:44:00.800 +together with advocacy groups and academia + +00:44:01.160 --> 00:44:05.560 +to create data that can be applied +to many different research projects. + +00:44:05.560 --> 00:44:08.280 +And that's a model +that we could try to replicate. + +00:44:08.280 --> 00:44:13.360 +Do you think that that I mean, you've +talked quite a bit about the opportunity + +00:44:13.360 --> 00:44:17.880 +for personalization and of course, +one of the biggest issues here is that + +00:44:18.240 --> 00:44:23.560 +large companies are looking +for the largest population, + +00:44:23.560 --> 00:44:26.920 +the largest profit, +which means the largest customer base, + +00:44:26.920 --> 00:44:30.680 +which tends to push them towards +thinking about + +00:44:30.720 --> 00:44:35.120 +and not thinking about minorities, +diversity, etc.. + +00:44:35.120 --> 00:44:38.720 +But the training models +and the personalization + +00:44:40.240 --> 00:44:42.280 +strategies that you've talked about + +00:44:42.320 --> 00:44:47.000 +are things that are emerging possibilities +within large learning models. + +00:44:47.320 --> 00:44:51.080 +We have the opportunity to take +what has already been done + +00:44:51.080 --> 00:44:53.720 +generally and apply + +00:44:54.080 --> 00:44:57.680 +more personalized, +smaller data sets, etc.. + +00:44:58.040 --> 00:45:01.040 +Do you think there's a there's a role + +00:45:01.040 --> 00:45:04.960 +for the large companies to prepare the the + +00:45:05.440 --> 00:45:08.960 +the ground and then for + +00:45:10.240 --> 00:45:12.360 +the remaining issues to + +00:45:13.480 --> 00:45:17.000 +piggyback on that +with with the new training sets? + +00:45:17.480 --> 00:45:20.600 +Or do you think even there +we're going to have + +00:45:22.040 --> 00:45:22.920 +both + +00:45:22.920 --> 00:45:25.560 +cost and availability issues? + +00:45:27.280 --> 00:45:29.920 +Well, you know, +I mean, I think that the model + +00:45:29.920 --> 00:45:33.640 +that you described is +is is already happening in places + +00:45:33.960 --> 00:45:37.160 +like with the +the speech accessibility project. + +00:45:37.800 --> 00:45:41.360 +The ultimate goal would be + +00:45:41.360 --> 00:45:45.080 +to have one model that can handle + +00:45:46.640 --> 00:45:49.280 +more diverse datasets + +00:45:50.360 --> 00:45:51.680 +and it takes + +00:45:52.760 --> 00:45:55.240 +a concerted effort to gather that data. + +00:45:56.920 --> 00:45:59.480 +But if a community gathered the data + +00:46:00.560 --> 00:46:03.720 +and it was possible +to contribute that data, then, + +00:46:05.200 --> 00:46:08.120 +you know, that's that's another direction +that we can + +00:46:09.200 --> 00:46:13.400 +influence the the larger models +that are trained on large data. + +00:46:14.000 --> 00:46:16.920 +But personalization is + +00:46:18.320 --> 00:46:21.120 +I think it's going to be very important + +00:46:21.120 --> 00:46:25.200 +for for tackling some of that tail end. So + +00:46:26.480 --> 00:46:30.720 +personalization +is not just an accessibility benefit. + +00:46:31.080 --> 00:46:33.800 +There's there's lots of + +00:46:33.880 --> 00:46:36.640 +there's lots of tail populations, small + +00:46:36.640 --> 00:46:40.880 +end populations that add up to a large end +and a lot of people. + +00:46:40.880 --> 00:46:45.200 +So the more the +I think that the big companies benefit + +00:46:45.440 --> 00:46:49.880 +greatly +by exploring these smaller populations + +00:46:49.880 --> 00:46:55.400 +and learning how to adapt models +to different populations. + +00:46:55.400 --> 00:46:56.520 +And then + +00:46:57.280 --> 00:47:02.000 +as I mentioned, the ultimate goal +would be to learn how to fold that back + +00:47:02.000 --> 00:47:06.120 +in to a larger model +without it being lost in the process. + +00:47:06.200 --> 00:47:10.920 +Yeah, we have the dilemma that the further +you are from the larger model, + +00:47:10.920 --> 00:47:16.400 +the more you need to work +to shift it in your direction. + +00:47:16.920 --> 00:47:17.840 +So the + +00:47:19.440 --> 00:47:20.320 +that is + +00:47:20.320 --> 00:47:23.480 +something +I think that will need to be addressed. + +00:47:23.480 --> 00:47:28.080 +Whatever personalization happens, +the people that need the personalization + +00:47:28.080 --> 00:47:32.200 +the most will have the greatest difficulty +with the personalization. + +00:47:32.200 --> 00:47:36.200 +Do you think there's any strategies +that that might be available + +00:47:36.440 --> 00:47:40.160 +for us to use to address +that particular dilemma? + +00:47:41.040 --> 00:47:43.800 +Yeah You're you're touching my heart +with that question + +00:47:43.800 --> 00:47:46.640 +because I, I really + +00:47:47.680 --> 00:47:51.560 +that's been an ongoing problem +in accessibility for forever + +00:47:52.040 --> 00:47:55.840 +the the and not just in the context of AI, + +00:47:56.000 --> 00:47:59.040 +but the people who would benefit the most + +00:47:59.040 --> 00:48:04.920 +from personalization may be in a position +that makes it hard to discover + +00:48:04.920 --> 00:48:09.080 +and activate even personalization +that's already available. + +00:48:09.920 --> 00:48:12.200 +So one approach that works, I think in + +00:48:12.200 --> 00:48:15.840 +some context is dynamic adaptation, + +00:48:16.160 --> 00:48:21.440 +where instead of a person +needing to adapt to a system, + +00:48:21.440 --> 00:48:25.320 +the system can kind of flexibly adapt +to the person that's using it. + +00:48:26.040 --> 00:48:28.960 +And I think that works in situations where + +00:48:30.480 --> 00:48:32.560 +the person doesn't need to behave +any different + +00:48:32.600 --> 00:48:35.120 +to take advantage of that adaptation. + +00:48:36.800 --> 00:48:38.320 +It doesn't work so well where + +00:48:38.320 --> 00:48:40.600 +there's maybe a specific + +00:48:41.440 --> 00:48:44.360 +input methods that you might want to use + +00:48:44.360 --> 00:48:47.520 +that would be beneficial +where you need to do something different. + +00:48:47.840 --> 00:48:52.240 +So for, you know, for language models, +maybe we can imagine + +00:48:53.200 --> 00:48:55.520 +an Uber language model that + +00:48:56.560 --> 00:49:01.400 +first recognizes, Oh, this person's +speech is closest to this sub model + +00:49:01.400 --> 00:49:06.200 +that I have learned, and I'm +going to use that model for this person. + +00:49:06.200 --> 00:49:10.120 +And you could think of that +in terms of decreasing the + +00:49:10.120 --> 00:49:10.960 +distance, yeah. + +00:49:11.440 --> 00:49:12.920 +Yeah, yeah. + +00:49:13.920 --> 00:49:15.480 +So that's one that's one idea. + +00:49:15.480 --> 00:49:17.800 +What what do you think. + +00:49:17.800 --> 00:49:19.320 +Yeah. + +00:49:19.320 --> 00:49:22.320 +I'm wondering +whether there is an opportunity + +00:49:22.600 --> 00:49:26.120 +or if there ever will be taken +an opportunity + +00:49:26.560 --> 00:49:30.960 +to rethink just how we, we design + +00:49:31.280 --> 00:49:35.720 +what design decision we make +and how we develop + +00:49:35.720 --> 00:49:39.160 +and brings the systems to market + +00:49:39.560 --> 00:49:42.680 +such that +there is the opportunity for greater + +00:49:43.880 --> 00:49:45.640 +democratization or + +00:49:45.640 --> 00:49:48.320 +access to the tools and that + +00:49:50.000 --> 00:49:55.160 +don't begin with the the notion of let's + +00:49:56.480 --> 00:49:58.880 +design first for the majority +and then think about + +00:49:59.160 --> 00:50:01.760 +I mean, this is is an inflection point. + +00:50:01.760 --> 00:50:06.160 +There is an opportunity for small data +sets, zero shot training, + +00:50:06.160 --> 00:50:10.080 +etc., transfer, transformation transfer. + +00:50:10.080 --> 00:50:14.800 +Is this a time +when we can have a strategic push to say, + +00:50:15.440 --> 00:50:17.840 +let's think about other ways of + +00:50:17.840 --> 00:50:22.240 +of actually developing these tools +and releasing these tools? + +00:50:23.120 --> 00:50:25.760 +Maybe that's a little too idealistic that + +00:50:26.560 --> 00:50:29.480 +I don't know what you're thinking, +is there? + +00:50:29.480 --> 00:50:32.600 +Yeah, I, I think especially + +00:50:32.720 --> 00:50:36.200 +if you're in a domain +where you've identified that there's, + +00:50:37.400 --> 00:50:40.440 +you real risk and strong risk of bias. + +00:50:41.000 --> 00:50:45.480 +It's a +it should be a part of the design process + +00:50:45.800 --> 00:50:51.560 +to include people who would be outliers, +people + +00:50:51.560 --> 00:50:55.760 +who are going to test the boundaries +of what your solution can do. + +00:50:56.560 --> 00:50:58.080 +People that are going to help you + +00:50:59.720 --> 00:51:02.240 +understand +the problems that it might introduce. + +00:51:02.640 --> 00:51:06.680 +So it it's + +00:51:07.720 --> 00:51:08.720 +it's what should happen + +00:51:08.720 --> 00:51:12.560 +I think in design in in any system + +00:51:12.560 --> 00:51:16.400 +that especially if you're thinking +in AI, you need to think about + +00:51:16.880 --> 00:51:21.280 +the risks that you might be introducing +and you can’t really think about that + +00:51:21.320 --> 00:51:25.160 +without having the right people involved. + +00:51:25.160 --> 00:51:25.480 +Right... + +00:51:25.480 --> 00:51:29.000 +Whether that's by... somebody yesterday +I think mentioned + +00:51:29.000 --> 00:51:30.960 +something about + +00:51:32.000 --> 00:51:33.840 +teaching designers + +00:51:33.840 --> 00:51:37.280 +and developers more about accessibility. + +00:51:37.280 --> 00:51:40.640 +And and I think that's +a really important point too + +00:51:41.120 --> 00:51:44.800 +that building diverse +teams is really important. + +00:51:44.920 --> 00:51:48.720 +Getting more diversity into computer +science is really important. + +00:51:49.120 --> 00:51:53.880 +But teaching the people who are already +there building things is also important. + +00:51:54.440 --> 00:51:57.000 +And I don't you know, +I don't meet very many + +00:51:57.000 --> 00:52:00.160 +people who say, Oh, +I don't care about accessibility. + +00:52:00.320 --> 00:52:01.880 +It's not important. + +00:52:01.880 --> 00:52:07.120 +It's it's more that it's +it's still too difficult to do. + +00:52:07.120 --> 00:52:11.680 +And that's one place +where I think AI can really, really help. + +00:52:11.680 --> 00:52:16.600 +And some of the the tools that people have +talked about today are examples of that + +00:52:17.040 --> 00:52:21.200 +where if we can make it easy +enough and lower that barrier + +00:52:21.800 --> 00:52:25.040 +and take opportunity of these creation +points + +00:52:25.040 --> 00:52:28.760 +to teach people +as well about accessibility. + +00:52:28.760 --> 00:52:29.200 +So not + +00:52:30.480 --> 00:52:31.480 +not always + +00:52:31.480 --> 00:52:36.360 +to fix everything for them, +but to fix things with them + +00:52:36.920 --> 00:52:41.880 +so that they can learn going +forwards and grow. + +00:52:41.880 --> 00:52:44.760 +I think that's a really exciting area. + +00:52:44.760 --> 00:52:46.840 +Yes, and a great way to to support + +00:52:46.840 --> 00:52:50.840 +born accessible, +so accessible by default + +00:52:50.920 --> 00:52:54.720 +with respect to what is the tools +that are used to create it. + +00:52:55.640 --> 00:53:01.000 +I'm you you contributed some questions +that you would love to discuss + +00:53:01.440 --> 00:53:06.480 +and one of the first ones is is +AI’s role mostly considered + +00:53:06.600 --> 00:53:10.800 +as improving assistive technology +or digital accessibility in general. + +00:53:10.800 --> 00:53:15.080 +And of course, this gets to this idea +of not creating + +00:53:15.080 --> 00:53:20.280 +a segregated set of innovations +that specifically address + +00:53:20.280 --> 00:53:24.000 +people with disabilities, +but also making sure that + +00:53:25.160 --> 00:53:27.200 +the innovations that are brought + +00:53:27.200 --> 00:53:30.920 +about by addressing +the needs of people who whose needs of, + +00:53:31.280 --> 00:53:35.760 +well, who face barriers can benefit +the population at large. + +00:53:36.440 --> 00:53:39.560 +So do what what do you think? + +00:53:40.040 --> 00:53:42.600 +What is the the future direction? + +00:53:43.520 --> 00:53:45.800 +Yeah, this was a question that came from + +00:53:45.800 --> 00:53:49.280 +an attendee, I think that was put into +in the registration process. + +00:53:49.640 --> 00:53:51.680 +And I, + +00:53:51.680 --> 00:53:54.760 +I do think it's really important +to view AI + +00:53:54.760 --> 00:53:58.120 +as a tool for digital +accessibility in general and + +00:53:58.200 --> 00:54:01.120 +and not to just think about the end + +00:54:01.200 --> 00:54:04.120 +user applications +although those personal AI + +00:54:04.160 --> 00:54:08.320 +technologies are really important +and they're life changing and they can do things + +00:54:08.320 --> 00:54:13.240 +that aren't achievable in any other way, +but AI + +00:54:13.240 --> 00:54:16.240 +is already +a part of the development process + +00:54:16.240 --> 00:54:19.280 +and accessibility +needs to be a part of that. + +00:54:19.280 --> 00:54:22.960 +And we have so many challenges +to solve there. + +00:54:22.960 --> 00:54:23.800 +I think it's + +00:54:25.640 --> 00:54:28.440 +an area +that we need to pay more attention to. + +00:54:28.640 --> 00:54:33.040 +So not just applying AI +to detect accessibility problems, + +00:54:33.040 --> 00:54:37.160 +but engaging with those mainstream +development tools + +00:54:37.160 --> 00:54:39.600 +to make sure the accessibility +is considered. + +00:54:41.560 --> 00:54:43.680 +One sort of associated + +00:54:43.760 --> 00:54:46.640 +piece to this that came to mind. + +00:54:47.160 --> 00:54:50.360 +And I'm going to take the privilege +of being the person asking the questions. + +00:54:50.640 --> 00:54:54.200 +I mean, the focus of most of AI innovation + +00:54:54.200 --> 00:54:58.400 +has been on replicating and potentially +replacing human intelligence + +00:54:58.800 --> 00:55:05.080 +as opposed to augmenting or thinking +about other forms of intelligence. + +00:55:05.080 --> 00:55:11.320 +And I wonder whether the I mean, +our experiences in assistive technology + +00:55:11.320 --> 00:55:15.520 +and how technology +can become an accompaniment + +00:55:15.520 --> 00:55:19.080 +or an augmentation +rather than a replacement + +00:55:19.600 --> 00:55:22.840 +might have some insights to give in this + +00:55:23.480 --> 00:55:26.920 +improvement of digital inclusion. + +00:55:26.920 --> 00:55:29.080 +Yeah, +I think you're you're absolutely right. + +00:55:29.080 --> 00:55:34.520 +It's it's human +AI cooperation and collaboration + +00:55:34.760 --> 00:55:39.160 +that's going to +get us the best results. And + +00:55:43.000 --> 00:55:45.440 +the the + +00:55:45.640 --> 00:55:47.280 +language the language models + +00:55:47.280 --> 00:55:51.080 +that we have, +the promise that they have of more interactive + +00:55:51.120 --> 00:55:53.640 +dialog like interactions + +00:55:54.120 --> 00:55:57.280 +are, you know, heading + +00:55:57.280 --> 00:55:59.320 +in a direction they're going to support + +00:55:59.920 --> 00:56:03.920 +much more natural human AI dialog + +00:56:03.920 --> 00:56:06.920 +and accessibility is such a complex + +00:56:06.920 --> 00:56:09.960 +topic where + +00:56:10.120 --> 00:56:11.920 +it's not always obvious + +00:56:11.920 --> 00:56:14.840 +what I'm trying to convey with this image. + +00:56:14.840 --> 00:56:18.000 +How important is +is the thing, you know, it's not it's + +00:56:18.000 --> 00:56:20.360 +not necessarily easy to + +00:56:21.960 --> 00:56:24.120 +to decide what exactly is the + +00:56:25.480 --> 00:56:28.400 +correct alternatives for for something + +00:56:28.400 --> 00:56:30.720 +or there's plenty of other examples + +00:56:32.200 --> 00:56:34.920 +where a + +00:56:35.640 --> 00:56:39.480 +the combination of an AI +that has been trained on + +00:56:39.960 --> 00:56:44.160 +some of the general principles +of of good accessibility practice + +00:56:44.400 --> 00:56:48.040 +and a person who may not be as familiar +but really understands the domain + +00:56:48.040 --> 00:56:50.720 +and the context +of this particular application. + +00:56:51.160 --> 00:56:53.640 +It's when you put those +two things together + +00:56:54.400 --> 00:56:56.920 +that, things are going to start to work + +00:56:57.080 --> 00:57:02.600 +so AI can support the person, +not replace the person. + +00:57:02.600 --> 00:57:03.760 +And of course, + +00:57:05.280 --> 00:57:06.000 +the the + +00:57:06.000 --> 00:57:10.280 +one issue that we need to thorny issue +that we need to overcome + +00:57:10.520 --> 00:57:14.120 +with respect to AI, is that + +00:57:14.720 --> 00:57:18.440 +the challenge of addressing +more qualitative + +00:57:18.440 --> 00:57:23.040 +non quantitative values and ideas etc., + +00:57:23.920 --> 00:57:27.240 +so that it'll be interesting +to see what happens there. + +00:57:28.800 --> 00:57:29.440 +Yeah. Yeah. + +00:57:29.440 --> 00:57:33.200 +I thought Yeliz +had a very good suggestion this morning of + +00:57:33.760 --> 00:57:35.760 +perhaps we should pay attention to + +00:57:36.920 --> 00:57:39.320 +how people are making these judgments, +how the AI, + +00:57:40.040 --> 00:57:42.680 +how the accessibility experts + +00:57:42.680 --> 00:57:45.840 +make these judgments, +what are the principles and, + +00:57:45.960 --> 00:57:49.160 +and can we articulate those better + +00:57:49.480 --> 00:57:52.040 +than... than we do + +00:57:52.600 --> 00:57:56.440 +now and communicate those better to. + +00:57:56.480 --> 00:57:57.080 +Right. + +00:57:57.080 --> 00:58:02.800 +And there's been this notion of thick data +which includes the context + +00:58:02.800 --> 00:58:07.160 +because frequently we've isolated the data +from the actual context + +00:58:07.480 --> 00:58:11.400 +and many of these things +are very contextually bound. + +00:58:11.400 --> 00:58:14.560 +And so do you see that there might be + +00:58:14.920 --> 00:58:17.920 +a reinvestigation of the + +00:58:18.680 --> 00:58:22.800 +where that data came from, +what the context of the data was, etc.? + +00:58:24.200 --> 00:58:27.560 +I think there might be a + +00:58:28.040 --> 00:58:30.800 +a rise in methods that that + +00:58:32.600 --> 00:58:35.200 +bring in the whole context, + +00:58:35.200 --> 00:58:38.120 +bring in more of the context, +multimodal inputs. + +00:58:38.720 --> 00:58:43.000 +Do you know even even for + +00:58:44.360 --> 00:58:48.280 +speech recognition +it it's doing what it does + +00:58:48.280 --> 00:58:52.200 +without even really knowing the domain +that it's working in + +00:58:53.400 --> 00:58:56.760 +and and that's pretty mindblowing +really think + +00:58:57.440 --> 00:59:02.400 +but when when it breaks down +is when there are technical terms + +00:59:02.400 --> 00:59:05.760 +when you're talking about a domain that is + +00:59:07.480 --> 00:59:09.200 +less frequently talked about, + +00:59:09.200 --> 00:59:12.400 +less represented and + +00:59:12.400 --> 00:59:15.200 +bringing in that domain knowledge + +00:59:15.200 --> 00:59:17.400 +I think is going to be huge. + +00:59:17.760 --> 00:59:21.680 +And similarly in terms of, of + +00:59:22.720 --> 00:59:26.640 +helping to create text alternatives +for things, + +00:59:26.640 --> 00:59:31.160 +the domain knowledge will will help to + +00:59:33.000 --> 00:59:36.560 +give a better +kind of base suggestion from the AI. + +00:59:36.920 --> 00:59:39.480 +And perhaps with dialog + +00:59:40.120 --> 00:59:45.440 +we can prompt people with +the right questions to help them decide + +00:59:46.080 --> 00:59:48.600 +is this +is this actually a decorative image + +00:59:48.600 --> 00:59:51.760 +or is it important for me to describe +what's in this image? + +00:59:51.760 --> 00:59:56.280 +You know, that's not actually always +a trivial question to answer. + +00:59:56.280 --> 00:59:59.720 +And of course, that brings in the issue +of classification and labeling + +01:00:00.280 --> 01:00:04.960 +and the need to box or classify +specific things. + +01:00:04.960 --> 01:00:08.680 +And many of these things are very fuzzy +contexts + +01:00:08.680 --> 01:00:13.160 +and classifiers are also determined +hierarchically. + +01:00:13.160 --> 01:00:15.200 +And there's yes. + +01:00:15.800 --> 01:00:20.960 +So yeah, maybe we don't need a perfect +classifier, but we need + +01:00:22.280 --> 01:00:26.760 +a good dialog where the, + +01:00:27.080 --> 01:00:29.520 +the, the system knows what questions +to ask + +01:00:30.040 --> 01:00:32.520 +to help the person decide. + +01:00:32.520 --> 01:00:34.160 +Right. + +01:00:34.160 --> 01:00:37.280 +And oh, and I just saw a message from + +01:00:37.280 --> 01:00:41.720 +Carlos saying to end the discussions + +01:00:42.440 --> 01:00:45.600 +and Carlos, I'm wondering +can we fit in one more question. + +01:00:46.520 --> 01:00:49.960 +I actually have to stop +at the top of the hour. So. + +01:00:50.640 --> 01:00:51.920 +Okay. + +01:00:52.560 --> 01:00:56.960 +So we will have an opportunity +to answer the questions + +01:00:56.960 --> 01:01:00.240 +that people have submitted in the question +and answer dialog. + +01:01:00.920 --> 01:01:03.360 +And we have access to those. So + +01:01:04.440 --> 01:01:06.600 +Shari will be able to respond + +01:01:06.920 --> 01:01:09.360 +to some of these additional questions +that have been asked + +01:01:10.960 --> 01:01:15.480 +and and apologies +that we went a little over time. + +01:01:15.480 --> 01:01:17.960 +Carlos. Okay. Turn it back over to you. + +01:01:18.520 --> 01:01:20.560 +Okay. Thank you. Thank you, Shari. + +01:01:20.840 --> 01:01:23.040 +Thank You. Thank you + +01:01:23.040 --> 01:01:23.480 +Thank you. + +01:01:23.480 --> 01:01:24.560 +Shari and Jutta. + +01:01:24.560 --> 01:01:27.280 +It was I was loving this discussion. + +01:01:27.880 --> 01:01:31.960 +So it's really unfortunate that we +we have stop now. + +01:01:32.880 --> 01:01:35.320 +But thank you. Thank you so much for + +01:01:37.400 --> 01:01:38.640 +your presentations. + +01:01:38.640 --> 01:01:40.280 +Thank you all. + +01:01:40.280 --> 01:01:43.880 +Thank you +also to to all the panelists yesterday + +01:01:43.880 --> 01:01:46.640 +and today for making this + +01:01:47.840 --> 01:01:49.040 +a great symposium. + +01:01:49.040 --> 01:01:52.400 +Lots of interesting +and thought provoking ideas. + +01:01:54.320 --> 01:01:56.400 +And thank you all for attending. + diff --git a/pages/about/projects/wai-coop/symposium2.md b/pages/about/projects/wai-coop/symposium2.md new file mode 100644 index 00000000000..ee7a32f3617 --- /dev/null +++ b/pages/about/projects/wai-coop/symposium2.md @@ -0,0 +1,916 @@ +--- +title: "Artificial Intelligence (AI) and Accessibility Research Symposium January 2023" +title_html: "Artificial Intelligence (AI) and Accessibility Research Symposium January 2023" +nav_title: Symposium 2 +lang: en + +permalink: /about/projects/wai-coop/symposium2/ +ref: /about/projects/wai-coop/symposium2/ + +footer: > +Alternate videos and transcripts: For those who cannot access YouTube, the videos are also available from the W3C media server as mp4 files. The transcripts together are available in day 1 transcripts and day 2 transcripts.
+Date: Updated 30 August 2023.
+Editors: Letícia Seixas Pereira and Carlos Duarte. Contributors: Kevin White, Shawn Lawton Henry, and participants of the APA WG.
+Developed under the Research Questions Task Force (RQTF) of the Accessible Platform Architectures Working Group (APA WG). Developed as part of the WAI-CooP project, co-funded by the European Commission.
+--- + +{::nomarkdown} +{% include toc.html type="start" title="Page Contents" class="full" %} +{:/} + +{::options toc_levels="2" /} + +- The TOC will replace this text. +{:toc} + + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +## Introduction +{:#introduction} + +Researchers, practitioners, and users with disabilities participated in an international online symposium exploring the positive and negatives impact of artificial intelligence (AI) in digital accessibility. + +This online symposium took place on 10 and 11 January 2023 and brought together researchers, academics, industry, government, and people with disabilities, to explore one the most pressing emerging technologies, artificial intelligence. The symposium aimed to identify current challenges and opportunities raised by the increasing use of AI regarding digital accessibility and explore how ongoing research can leverage and hinder digital accessibility. + + +## Opening Keynote: Jutta Treviranus + +**Jutta Treviranus** is the Director of the [Inclusive Design Research Centre (IDRC)](http://idrc.ocadu.ca) and professor in the faculty of Design at OCAD University in Toronto. Jutta established the IDRC in 1993 as the nexus of a growing global community that proactively works to ensure that our digitally transformed and globally connected society is designed inclusively. Dr. Treviranus also founded an innovative graduate program in inclusive design at OCAD University. Jutta is credited with developing an inclusive design methodology that has been adopted by large enterprise companies such as Microsoft, as well as public sector organizations internationally. In 2022 Jutta was recognized for her work in AI by [Women in AI](https://www.womeninai.co/) with the [AI for Good - DEI AI Leader of the Year award](https://www.womeninai.co/copy-of-na-media-1). + +### First, Do No Harm +In this symposium, Jutta Treviranus delivers an opening keynote that sheds light on the harms of AI specific to people with disabilities. She addresses ethical concerns surrounding AI, such as lack of representation, human bigotry, manipulative practices, unfair value extraction, exploitation, and disinformation. Jutta emphasizes the significance of considering the impact of AI on people with disabilities, as they are often at the margins of justice-deserving groups, making them more vulnerable to both existing and emerging harms. She discusses the increasing complexity of decision-making processes and the growing appeal and usefulness of AI decision tools. Jutta also highlights the challenges of data diversity, predictive accuracy, data privacy, and the need for transparency in data usage. The discussion expands to include topics such as ethics, bias, and the efforts being made globally to address AI ethics. Jutta concludes by exploring the potential of AI and the opportunity it presents to reassess what we want to automate, what we mean by concepts such as best, optimal, and fairness, and how we can include marginalized individuals in the development and use of AI technologies. + +>> CARLOS DUARTE: People are still joining, but it's about time we start. We are delighted that you are able to + join us in this Artificial Intelligence and Accessibility Research Symposium. We are looking forward to a couple of + days of stimulating presentations and discussions. My name is Carlos Duarte and, on behalf of the whole organizing + team, I would like to offer you our warmest welcome. Let me just take a moment to say a big thank you to the + wonderful people that made this happen, my colleague here at the University of Lisbon, Letícia Seixas Pereira, and a + group of people from the W3C Architecture Working Group, and those from the W3C Accessibility Initiative.
+Before we get going, just a couple of important reminders. By taking part in this symposium you agree to follow the + W3C code of ethics and professional conduct and ensure to promote a safe environment for everyone in this meeting. + Also, this session is being video recorded and transcribed. The transcription will be posted on the symposium + website later. If you object to being transcribed, we ask you to refrain from commenting.
+I would also like to take this opportunity to thank the European Commission that funds the WAI-CooP project + through the Horizon 2020 program.
+Now let me describe some of the logistics of this meeting. Audio and video are off by default. Please turn them on + only if requested and turn them off again when no longer needed. During the keynote presentations and the panel + discussion, you can enter your questions using the Q&A feature of Zoom. Speakers will monitor the Q&A and they might + answer your questions either live if time allows it, or directly in the Q&A system. You can use the chat feature to + report any technical issues you are experiencing. We will monitor the chat and try to assist you if needed. If + during the seminar your connection drops, please try to reconnect. If it is the whole meeting that's disrupted, you + won't be able to reconnect. We'll try to resume the meeting for a period of up to 15 minutes. If we're unsuccessful, + we will contact you by email with further instructions.
+As I mentioned before, this symposium is one of the results of the WAI-CooP project. That project started in + January of 2021 and will run until the end of this year. This is the second symposium which means that we will still + have another symposium later this year. The main goal of the WAI CooP project is to support the implementation of + international standards for digital accessibility. It aims to achieve this goal from various perspectives. It will + provide different overviews of accessibility related resources, including tools or training resources. It will + develop actions like this one to promote collaboration between research and development players, and it is creating + opportunities for the stakeholders in this domain to exchange their best practices through, for example, a series of + open meetings.
+As I just mentioned, this is the second of three symposiums that will be organized by the WAI-CooP project. This + symposium aims to identify current challenges and opportunities raised by the increasing use of AI regarding digital + accessibility and to explore our ongoing research that can leverage and hinder digital accessibility. I'll now + finish by introducing you to today's agenda. We will start with a keynote by Jutta Treviranus. This will be followed + by our first panel, the panel will focus on the use of computer vision techniques in the scope of accessibility of + media resources. Before the second panel, we'll have a 10 minute coffee break. The second, and the last panel of + today, it will also address the accessibility of media resources but now from the perspective of natural language + processing.
+Now let's move to the opening keynote, for which we're delighted to welcome Jutta Treviranus. Jutta Treviranus is + the director of the Inclusive Design Research Center and the professor in the faculty of design at the OCAD + University in Toronto. The floor is yours.
+ +>> JUTTA TREVIRANUS: Thank you, Carlos. It is a great pleasure to be able to talk to you about this important + topic. I am going to just start my slides. I'm hoping that what you see is just the primary slide, correct?
+>> CARLOS DUARTE: Correct.
+>> JUTTA TREVIRANUS: Wonderful. Okay. Thank you, everyone. I will voice my slides and the information and the + images. I have titled my talk First, Do No Harm. I'm usually really optimistic of a person, I'm hoping to provide an + optimistic message.
++ To realize the benefits of AI, I believe we need to further recognize and take into account the harms. I'm going to + limit my discussion to the harms that are specific to People with Disabilities. There is a great deal of work + detailing the ethical concerns of currently deployed AI from lack of representation to human bigotry, finding its + way into algorithms to manipulative practices, unfair value extraction and exploitation and disinformation. I'll + focus on accessibility and disability, including the recognition that disability is at the margins of all other + justice deserving groups, therefore, most vulnerable to the general and emerging harms, but also the potential + opportunities of AI. + Carlos shared a number of questions and they're all great questions. We agreed this is better covered through a + conversation than a presentation. At the end of my talk, I'm going to invite Shari and we'll talk more about this + tomorrow after the book talk. +
+Our society is plagued by more and more difficulties. As the world becomes more and more complex and entangled, the + choices increase in ambiguity, the risks associated with each decision becomes more consequential, the factors to + consider in each decision more numerous, convoluted, confusing. Especially times of crisis, like we have been + experiencing these last few years, in highly competitive situations where there is scarcity, AI decision tools + become more and more attractive and useful. As an illustrative example, it is no wonder that over 90% of + organizations use some form of AI hiring tool according to the U.S. equal employment opportunity commission. As work + becomes less formulated and finding the right fit becomes more difficult, they are a highly seductive tool. As an + employer, when choosing who to hire from a huge pool of applicants what, better way to sift through, find the gems + and eliminate the potential fail choices than to use AI system, with an AI tool making the decisions we remove the + risks of conflicts of interest and nepotism. What better way to determine who will be a successful candidate than to + use all of the evidence we have gathered from our current successful employees, especially when the jobs we're + trying to fill are not formulating and there is not a valid test to devise for candidates to determine their + suitability, AI can use predictive analytics to find the optimal candidates. +
+In this way, we're applying solid, rigorous science in what would be an unscientific decision, otherwise we're not + relying on fallible human intuition. Tools are adding information beyond the application to rule out falsehoods in + the applications, after all, you never know, there are so many ways to fake a work history, a cover letter or to + cheat in academia. The AI hiring tools can verify through gleaned social media data and information available on the + web or through networked employment data. After all, employees have agreed to share this as part of the conditions + of employment, and other employers have agreed as a conditions of using the tool. If that is not enough, AI + administered and processed assessments can be integrated. The tools are going beyond the practical and qualitatively + determinable capacity of candidates to finding the best fit culturally to make sure that the chosen candidates don't + cause friction but integrate comfortably. The tools will even analyze data from interviews to have a socio emotional + fit of candidates. If that's not satisfactory, the employer can tweak the system to add factors like the favored + university or an ideal persona and + pick an ideal employee as a model and the systems are better and more sophisticated in finding a match. The same + system can then guide promotion and termination ensuring consistency of employment policies. +
+So what's wrong with this? Science, math, statistical reasoning, efficiency, accuracy, consistency, better and more + accurate screening for the best fit of the scientifically determined optimal employee, accurate replication and + scaling of a winning formula, it is a very seductive opportunity. What could be wrong? For the employing + organization we have a mono culture recreating and showing the successful patterns of the past .with more data and + more powerful analysis the intended target becomes more and more precise. The employer finds more and more perfect + fits. What's wrong with that? For the organization, what happens when the context changes? Then the unexpected + happens, a monoculture doesn't offer much adaptation, flexibility, alternative choices. +
+As a visual description, I have an image showing what happened to cloned potatoes in a plight that was survived by + a diverse crop. + Of course, we have diversity, equity and inclusion measures to compensate for discriminatory hiring and increase the + number of employees from protected, underrepresented groups. Even there, there will be an even greater rift between + the mono culture and the candidates hired through diversity and equity programs. What happens to the candidate with + the disability who would otherwise be a great fit for doing the job when judged by these hiring systems. When AI is + analyzing sorting and filtering data about a large group of people, what does disability look like? Where is + disability in a complex entangled adopt active multivarying dataset. Self identification is often disallowed and + many people don't self identify, even if we had a way to identify the definition and boundaries of disability are + highly contested. Disability statisticians are acutely aware of some of the challenges. In any normal distribution, + someone with a disability is an outlier, the only common data characteristic of disability is different from the + average, the norm, People with Disabilities are also more diverse from each other than people without disabilities. + Data points in the middle are close together, meaning that they are more alike, data points at the periphery, + they're further apart, meaning that they're more different from each other. Data regarding people living with + disabilities are spread the furthest in what I call the starburst of human needs. As a result of this pattern, any + statistically determined prediction is highly accurate for people that cluster in the middle, inaccurate moving from + the middle and wrong as you get to the edge of a data plot. +
+Here I'm not talking about AI's ability to recognize and translate things that are average or typical, like typical + speech or text or from one typical language to another or to label typical objects in the environment or to find the + path that most people are taking from one place to another. Even there, in these miraculous tools we're using, if we + have a disability, if the speech is not average, if the environment you're in is not typical, AI also fails. + Disability is the Achilles' heel of AI applying statistical reasoning in disability. You have the combination of + diversity, variability, the unexpected, complexity and entanglement and the exception to every rule or + determination. AI systems are used to find applicants that match predetermined optima with large datasets of + successful employees and hires. The system is optimizing the successful patterns of the past, all data is from the + past. The analytical power tool is honing in on and polishing the factors that worked before and we know how much + hiring there is of people with disabilities in the past. The tool is built to be biased against different + disabilities, different ways of doing the job, different digital traces, different work and education history, + different social media topics and entangled profile of many differences.
+As AI gets better or more accurate in its identification of the optima, AI gets more discriminatory and better at + eliminating applicants that don't match the optima in some way. The assumptions the AI power tools are built on, + scaling and replicating past success will bring about future success. Optimizing data characteristics associated + with past successes increases future successes. The data characteristics that determine success need not be + specified or known to the operators of the AI or the people who are subject to the decisions, and the AI cannot + articulate at the moment the highly defused, possibly an adaptive reasons behind the choices. Current AI systems + cannot really explain themselves or the choices despite the emergence of explainable AI. How many of you have + experienced tools like Microsoft and other similar tools that purport to help you be more efficient and productive + by analyzing the work habits. The surveillance systems provide more and more granular data about employment + providing intelligence about the details of the average optimal employee. The results of the AI design is that the + optima will not be a person with a disability. There are not enough successfully employed Persons with Disabilities + but it is more than data gaps, even if we have full representation of data from Persons with Disabilities there will + not be enough consistent data regarding success to reach probability thresholds. Even if all data gaps are filled, + each pattern will still be an outlier or minority, and will lack problematic power in the algorithm. The same + pattern is happening in all life altering difficult decisions. AI is being applied and offered to competitive + academic admissions departments, so you won't get admitted to beleaguered health providers in the form of medical + calculators and emergency triage tools resulting in more death and illness if you're different from your + classification, to policing, to parole board, to immigration and refugee adjudications, to tax + auditor, meaning more tax payer with disabilities are flagged, to loan, mortgage officers, meaning people with + unusual asset patterns won't get credit, to security departments, meaning outliers become collateral damage. +
+At a community level we have evidence based investment by governments, AI guiding political platforms, public + health decision, urban planning, emergency preparedness and security programs. None will decide with the + marginalized outlier, the outliers will be marked as security risks. These are monumental life changing decisions, + but even the smaller seemingly inconsequential decisions can harm by a million cuts. What gets covered by the new, + what products make it to the market, the recommended root route provided by GPS, the priority given to supply chain + process, what design features make it to the market. +
+Statistical reasoning that's inherently biased against difference from the average is not only used to apply the + metrics, but to determine the optimum metrics. This harm predates AI. Statistical reasoning as the means of making + decisions does harm. It does harm to anyone not like the statistical average or the statistically determined optima. + Assuming that what we know about the majority applies to the minority does harm. Equating truth and valid + evidence with singular statistically determined findings or majority truth does harm. AI amplifies, accelerates and + automates this harm. It is used to exonerate us of responsibility for this harm. +
+We have even heard a great deal about the concern for privacy. Well, people with disability, they're most + vulnerable to data abuse and misuse. Deidentification does not work if you're highly unique, you will be + reidentified. Differential privacy will remove the helpful data specifics that you need to make the AI work for you + and your unique needs. Most People with Disabilities are actually forced to barter their privacy for essential + services. We need to go beyond privacy, assume there will be breaches and create systems to prevent data abuse and + misuse. We need to ensure transparency regarding how data is used, by whom and what purpose. It is wonderful that + the E.U. is organizing this talk, because the E.U. is doing some wonderful measures in this regard. +
+Wait, we're talking about a great number of harms. Haven't we developed some approaches, some solutions to this? + Don't we have auditing tools that detect and eliminate bias and discrimination of AI? Don't we have some systems + that certify whether an AI is ethical or not? Can't we test tools for unwanted bias?
+Unfortunately, AI auditing tools are misleading in that they don't detect bias against outliers and small + minorities or anyone who doesn't fit the bounded groupings. Most AI ethics auditing systems use cluster analysis + comparing the performance regarding a bounded justice deserving group with the performance for the general + population. There is no bounded cluster for disability. Disability means a defused, highly diverse set of + differences. Those AI ethic certification systems and the industry that is growing around them raise the expectation + of ethical conduct, that the problem has been fixed, making it even more difficult for the individual to address + harm. Many falls prey to cobra effects or the unintended consequences of over simplistic solutions to complex + problems or linear thinking, falling into the the rut of mono causality, where the causes are complex and entangled. +
+There is some helpful progress in regulatory guidance, one example, it is the U.S. Equal Employment Opportunity + Commission which has developed the The Americans with Disabilities Act and the Use of Software, Algorithms, and + Artificial Intelligence to Assess Job Applicants and Employees, it is a very long title. Much of the guidance + focuses on fair assessments or tests and accommodation, not on the filtering out of applicants before they're + invited to take an assessment or by employers who don't use assessments. The data related suggestion is to remove + the disability related data that is the basis of disability discrimination. What we found, it is that the data + cannot be isolated. For example, in an interrupted work history, it will have other data effects and markers, making + it + hard to match the optimal pattern, even when that is removed. +
+For the ethical harms, that are common to a whole group of marginalized and individuals, there are numerous AI + ethic efforts emerging globally. We have tried to capture the disability relevant ones in the We Count project. This + includes standard bodies creating a number of standards that act as guidance, government initiatives that are + looking at impact of decisions using automated decision tools, academic research units that are looking at the + effects process and others. We have found that disability is often left out of the considerations or the ethic + approaches. As the questions that were submitted indicated, we're at an inflection point and this current inflection + point we're at, reminds me of the book the Axemaker's Gift, by Burke & Ornstein. They wanted us to be aware of the + Axemaker's Gifts. Each time there was an offering of a new way to cut and control the world to make us rich or safe + or invincible, more knowledgeable, we accepted the gift and used it and we changed the world, we changed our minds, + for each gift, redefined the way we thought, the values by which we lived and the truths for which we died. +
+But to regain my optimism, even AI's potential harm may be a double edged sword. The most significant gift of AI is + that it manifests the harms that have been dismissed as unscientific concerns. It gives us an opportunity to step + back and reconsider what we want to automate or what we want to accelerate. It makes us consider what we mean by + best, by optimal, by truth, democracy, planning, efficiency, fairness, progress and the common good.
+Some of the things we have done within my unit to provoke this rethinking include our inverted word cloud, a tiny + little mechanism that conventional word cloud increases the size and centrality of the most popular or statistically + frequent words. The less popular, outlying words decrease in size and disappear. We have simply inverted that + behavior. The novel, the unique words go to the center and grow in size. We have been trying to prove, indicate with + models, like the lawnmower of justice where we take the top off of the Gaussian or the bell curve, as it may be + called, to remove the privilege of being the same as the majority. The model needs to pay greater attention to the + breadth of data. We're exploring bottom up community led data ecosystems where the members govern and share in the + value of the data. This fills the gap left by things, like impact investing, for example. When social + entrepreneurship efforts that are supposedly addressing these problems can't scale a single impactful formula + sufficiently to garner support, it also works well to grow knowledge of things like rare illnesses that won't garner + a market for the treatments and therefore are not invested in.
+We're creating tools to reduce harm by signaling when a model will be wrong, unreliable, because the evidence based + guidance is wrong for the person being decided about. Here we're using a tool, the dataset nutrition label that + gives information about what data is used to train the model.
+Back to the Axemaker's Gift and the opportunity to reconsider where we're going, from a complexity perspective, + we're collectively stuck on a local optima, unable to unlearn our fundamental summits and approaches to find the + global optima. I believe there is a global optima. At the moment, as a society, we believe, or we act like that, to + succeed we need to do what we have been doing more effectively, efficiently, accurately, consistently. We're hill + climb, optimizing the patterns of the past, eroding the slope for anyone following us. We need to stop doing the + same things more efficiently and potentially reverse course.
+I have been considering the many local optima. We keep hill climbing, not just statistical reasoning finding a + single winning answer, not just winner takes all, zero sum gain capitalism and growth at all cost but majority + rules, all or nothing decisions. And even in our community, the accessibility community, the notion of a single + checklist of full accessibility for a group of hugely diverse people, many of whom are not represented when coming + up with the list.
+The people closest to the bottom are more diverse, closest to the path we need to follow to find the global optima, + less invested in current conventions. We need to diversify and learn to use our complementary skills and learn from + people who are currently marginalized, even in this community focused on accessibility. If anyone knows, we know + that it is at the margins, outer edge of our human starburst that we find the greatest innovation and the weakest + and the weak signals of crisis to come. This is where you feel the extremes of both the opportunities and the risks. + One of the emerging uncertainties that holds both greater opportunities and risks, it is generative AI. +
+What are the implications, if you have a disability, what will it do for accessibility? I'm sure you have heard + about tools like chatGPT, Stable Infusion, various versions of DALL-E, Midjourney and other tools, even today there + are announcements of new tools. They don't rely purely on statistical reasoning, they can transfer learning from + context to context, they use new processes called transformers that can pivot to new applications. They can also + create convincing, toxic lies. People with Disabilities tend to be most vulnerable to the misuse and abuse of toxic + tools.
+I'm going to invite Shari to help me address the emerging possibilities.
+>> SHARI TREWIN: Hello, everybody. I'm from Google, Shari Trewin, a middle age white woman with a lot of smile + lines on my face.
+So there's a lot to think about there! I wonder if we might start off where you ended there + talking a little bit about generative AI models and language models, and they're trained on a large amount of data + that may not reflect the moral values that we would like our models to incorporate. One question I think that would + be interesting for to us talk about is can we teach these large language models or generative AI to apply these + moral + values -- even though the very large datasets may not represent that. +
++ >> JUTTA TREVIRANUS: That's a great question. Thinking of how that might be done, one of the dilemmas is that we may + need to find a way to quantify abstract qualitative values. In that process, will that reduce these values. Deep + winning lacks judgment, the human sort of value, the human judgment that isn't quantitative, perhaps one way to + start is by recognizing human diversity and the diversity of context. There is a lot of talk about individualizing + applications, without making the cost exorbitant to the people that need them. The irony, of course, in that is that + the people that need that type of individualization the most are most likely to be the people that can't afford it. + It is not yet known, can we do that? Of course, there has been surprising advances and all sorts of different areas + with respect to AI and generative AI, but I think this is the issue of values and shared values, and the + articulation, and making “mechanizable” because, of course, we're talking about a machine and recognition, values + that we have difficulty even fully expressing is quite a challenge.What do you think, Shari? +
+>> SHARI TREWIN: It is a good point, can we express, or can we measure whether a model meets our values, or whether + we think it is free from bias or as free from bias as we can make it? Do we know how to evaluate that? I think is an + important question. Some of the steps that often get missed when creating a system that uses AI, what may help with + that, it would be starting off from the beginning by thinking about who are the people who may be at risk, what are + the issues that might be in the data, what historical biases may that data represent or include, and then actively + working with members of those communities to understand how are we going to measure fairness here? How will we going + to measure bias, what's our goal, how will we test, how will we know when we have achieved our goal. I think there + is some progress that could be made in the design process and thinking about the larger system that we're embedding + AI in. Everything doesn't have to be built into the one AI model and we can augment models, build systems around + models, taking into account their limitations and create a better overall whole system. +
++ >> JUTTA TREVIRANUS: Thinking about what the models are currently trained on and the masses of data used to build + the models, the training data is rife with discrimination against difference, right. How do we, how do they unlearn? + It is sort of, it matches some of the training that I do within my program, in that students have been socialized + with very similar things and often the issue is not learning, the issue is unlearning. How do you remove those + unconscious habituated values that are so embedded in our learning systems. + I agree, it is a huge opportunity especially with more context aware systems. Maybe what we need to pursue is, even + to address things like privacy, and the need to swim against this massive amount of data that's not applicable to + you, is on device, personalized, not personalized, because personalized, it is a term that's also sort of been + hijacked to mean cushioning, but individualized, let's use that term, system that takes your data and creates a + bottom up picture of what's needed. +
++ >> SHARI TREWIN: There is definitely interesting avenues to explore with transfer learning and to take a model + that's been trained on data and has learned some of the concepts of the task that we want, but maybe we would like + it to unlearn some of the things that it has learned, can we use techniques like transfer learning to layer on top + and unteach the model, and direct the model more in the direction that we want. I think the hopeful thing about + that, it needs magnitudes less data to train such a model. That make it is a little more achievable, a little less + daunting for the community to take on. +
+Do we think that current regulation systems are currently to the task of regulating current and emerging AI and + preventing the kind of harms you have been talking about?
++ >> JUTTA TREVIRANUS:No. (Laughter). No, for a simple answer, I don't think so. There is so many issues. Laws and + policies are developed at a much slower pace. We're dealing with an uncertain, very, very quickly moving, quickly + adapting area. When laws well, they need to be testable. In order to be testable, we have to create these static + rules that can be tested, which means we have to be fairly specific as opposed to general and abstract. That tends + to lead us towards one size fits one criteria, which we know are not great if we're trying to design for diversity + or encourage diversity. I think one of + the things we need to innovate, it is the regulatory instruments that we can use here. What's you're thinking about + this? +
++ >> SHARI TREWIN: Yeah. I think some of these regulatory instruments that we have do apply. If you're a company that + is using an AI system in screening job applicants, the disability discrimination laws still apply to you, somebody + could still bring a lawsuit against you, saying that your system discriminated against them, you're still liable to + defend against that and to watch out for those kinds of issues. In some ways, there are important pieces that we + need in place, that can be used to tackle problems introduced when AI systems are introduced. In other ways, there + is a little more of a gray area when the technology is not making discriminatory decisions, but it still may make + harmful mistakes or that mislead people, that people are relying on it for. You know, if anybody here has a legal + background, I would love to hear their take as well on how well the current consumer protections apply, for example, + if you're using any of these tools. +
+>> JUTTA TREVIRANUS: I have become aware of and worried about the people for whom the law isn't adequate. The fact + that we have a law, the fact that we supposedly have measures that prevent abuse or unethical practice, if you are + still being treated unethically, it makes it even harder for you. I think that the measures that we do have, the + regulations that we do have, have to some way of continuously being it traded upon so that we can catch the + individuals that are not included. We have to recognize that our “supposed” solutions are actually not solutions. + This is never fixed, that it is, it requires this ongoing vigilance. Yeah. There is much more to say + about that. Yes. It would be great to hear from anyone with a legal background.
++ >> SHARI TREWIN: Let's talk a little bit more about generative AI, it was mentioned in the end there. It produces + very multiple convincing statements when asked a question, but also, very plausible, it completely makes things up + and isn't always reliable. In fact, right now, is not connected to any form of ground truth, or able to assess the + accuracy of what it makes. One question that I think is interesting is, will this technology reach a stage where it + can support the kinds of decisions that we are using statistical reasoning for now, eventually. Obviously, right + now, it is not there yet. +
++ >> JUTTA TREVIRANUS: It is interesting because just recently there have been the announcements of the systems being + used for medical guidance, using large language models to come up with answers to your medical questions which, of + course, is quite… It will be interesting to see what happens. +
++ >> SHARI TREWIN: Scary, I think. +
++ >> JUTTA TREVIRANUS: Exactly, scary. And what about the medical device given to someone within the dataset that's + provided, there isn't a lot of advice. Given that the system, I mean, doesn't ask any of the LLM, the chatbots, how + confident they are in their answers, they'll answer that they are confident, because there isn't a sense of, what is + the risk level, or the confidence level at this particular response setup, there is no self awareness of what's + wrong, what is right, what is the context in front of me. +
++ >> SHARI TREWIN: That's a great opportunity there to explore whether we can enable models a little bit to know + better what they don't know. To know when the case that they're dealing with right now is not well represented in + their models or may be an outlier case that they should perhaps pass on to some other form of decision making or at + least convey less confidence. You know, I think generative AI today gives us a glimpse of the future, the kind of + interactions that are possible, the kind of ways we might interact with technology in the future. Clearly, there is + a research priority to ground it better in truth and it needs to be much more reliable, much more trustworthy, much + more accurate, but then, you know, today, it can't support the applications and the idea of using it to get medical + advice, it is just, that's a very scary thing. Because it is so eloquent that it is immediately trustworthy, and it + gets enough things right that we begin to trust it very quickly. In some ways, the advances that have been made, it + is so good that it really highlights the dangers more effectively. +
++ I think this is interesting to think about what a human AI interaction would look like in the future. Would we need + to train it to identify bias and kind of work with a larger language model to adapt responses. Would we, you know + how automatic image description has sort of evolved. At first, we would throw out words, that may should be in the + picture, sometimes it was right, sometimes it would be wrong. Now you see these generated alternative texts being + praised in the way that conveys the uncertainty. “Could be a tree”, or something like that. I think that the large + language models could do something similar to reduce the chances of misleading people. They may say things like + “many people seem to think blah, blah, blah”, or get better at citing sources. I think there is a lot of ways that + we can use these in direct research to overcome some of the obvious failings that are there right now, other + limitations that we currently have. +
++ Mark has shared in the chat that I can see, from the U.S. government regulatory side much of the current laws, or + regulations, they are to access government service, they're about the technical accessibility of the interfaces + rather than the more AI-focused questions around system exclusion or mismatch. That's coming back to our point about + the regulatory instances. +
++ >> JUTTA TREVIRANUS: I just noticed that Mark says what a Debbie Downer my talk is. I think, by design, we decided + between Shari and I, that I would provide the mournings and Shari would provide the optimism. +
++ >> SHARI TREWIN: I have the best job there. +
++ >> JUTTA TREVIRANUS: I think there are quite a few questions in the question and answer panel. Maybe what we should + do, there are so many things to explore with the emerging models and so many uncertainties, there are some great + questions there as well. +
++ >> SHARI TREWIN: Yeah. How about… they're jumping around on me. New questions. I know this is not in the right + order, but, as people are adding questions, they're kind of jumping. (Chuckle). +
++ So, Bruce Bailey is asking, he says fantastic keynote, please expound on personalization having been hijacked to + mean cushioning. I can guess, but that term and perspective is new to me. +
++ >> JUTTA TREVIRANUS: I can talk about that. A way that we recognize that we're all diverse, and especially if you + have a disability, you are diverse from other People with Disabilities and that our needs are there for diverse, it + is to look at how do we personalize. Personalization, it has been used as a term to look at using recommender + engines, using various ways in which we're offered only information and recommendations from people like us which, + of course, removes any dissidence and any diverse thinking and our exposure to alternative views and perspectives. + To some extent, it causes us to, it causes greater polarization because we're also offered a personalized view of + the current stance that we're taking, so that it gets confirmed again and again and again. I'm not talking that type + of personalization. I'm talking about the type of personalization where the interface makes it easier for us to + participate and addresses our specific very diverse requirements with respect to that participation. I moved away + from the term personalization simply because I don't want it to be mistaken for the type of personalization that + cushions us away from diverse perspectives because certainly, we need to be exposed to those, that diversity of + perspectives and we need to consider the diverse stories that people have. +
++ >> SHARI TREWIN: I think personalization is an essential part of accessibility in general but there's, you were + talking a particular kind of personalization. AI personalization, I'll talk a bit more in the keynote at the end, + about an example of AI personalization of personalized models that are permitting access to digital content which I + think is a, you could have, it is the lack of use of personalization. +
++ Yeah, so Kave Noori from EDF, thank you for this important keynote, I have seen different toolkits to test and + mitigate bias in AI. What is your view on them and their usefulness? +
++ >> JUTTA TREVIRANUS: We have been doing, actually as a part of a number of our projects, including ODD (Optimizing + Diversity with Disability) and We Count, looking at a variety of AI ethics auditing tools and also we have done sort + of the secret shopper, testing of employment tools, and seeing if we can detect the particular biases that comes, + unwanted biases, as made clear by us, the tools are intended to be biased. It is the unwanted bias as a proviso. + What we find, it is that they're great at cluster analysis and then they supplement the cluster analysis with a + number of questions that is asked by the implementer of the system. The primary technical key to the tools is + determining whether there is unfair treatment of one bounded group with another. That works well if you have + something like determining whether there is discrimination regarding gender, discrimination regarding declared race, + language, those sorts of things, which do cluster well. It doesn't, none of the tools really detect whether there is + discrimination based upon disability. Because the particular discriminating characteristics are so diffuse and + different from person to person, we don't see how it is possible in a litigation perspective, or in a regulatory + perspective, to prove that you have been discriminated against. It is going to very, very difficult to come up with + that proof because the particular characteristics are themselves so entangled and defused. It may not be one + particular characteristic associated with your disability that you would use to say, well, look at here, I'm being + discriminated against because of this characteristic that relates to my disability. +
++ >> SHARI TREWIN: I think there are a lot of the toolkits, many of them that are in the toolkits, they're group + fairness metrics like you say, where, and that's an important thing to measure and to look at. When we do have the + ability to identify groups and to know for sure who's in which group, which one. The boundaries of the groups, + they're always not fuzzy, you know, there's deeply embedded assumption that there are only two genders, for example. + In the data, and many of the tools, and they have their problems, and disability emphasize these problems, the same + problems. There are also individual fair metrics in measures and some of the toolkits include some of these kinds of + measures. Instead of asking, is this group as a whole treated equivalently to this other group? They ask, are + similar individuals treated similarly? You could imagine with an approach like that, if I as an individual with my + unique data, I could make a case that I was discriminated against by creating another person who was similar to me + in the respects that are important for this job. And see what kind of result they got compared to my result, and + that would be a way to measure individual fairness and build up a case. +
++ >> JUTTA TREVIRANUS: Yeah. Yes. Unfortunately, there is not that many tools that currently do that. The + certification systems that currently exist are not implementing those. There is much to work on there. +
++ >> SHARI TREWIN: Yeah. It is more of a case by case basis for this particular job. It is not so easy to make a + blanket statement about it, but I think it is not impossible to assess. Do we have time for one more? How much + longer do we have? Another 3 minutes? +
++ >> CARLOS DUARTE: Well, you have almost 10 minutes more. You can definitely take one more. +
++ >> SHARI TREWIN: Awesome. Great. Let's see. So, Fabien Berger, I feel that AI, but before it was KPIs or else, are + searched by managers to justify their decisions or run away from the responsibility of their decisions. It follows a + need for them, but with a wrong, incomplete answer. Do you agree? +
++ >> JUTTA TREVIRANUS: Yes. I think the issue, and I was trying to make that point but possibly not well enough, that + AI is doing much of what we have done before, but it is amplifying, accelerating, and automating those things. + Certainly, AI can be used for confirmation bias to find the specific justification for what it is that we need to + justify, whether it is something good or something bad. A lot of the harms of AI already existed because of course + AI is learning from our past practices and our data. Because, I guess I have often used the analogy of a power tool, + before it was this practice that was not that we did manually, so there was an opportunity to make exceptions, to + reconsider, you know, is this actually what we're doing to do something different but with the power tool, it + becomes this much more impactful thing, and there is less opportunity to craft the approach that we take. +
++ >> SHARI TREWIN: I think that's why it is really important to try to design for outliers and to consider outliers. + Again, I come back to this point of a system, the system as a whole, that includes AI. If we can't guarantee that + the AI itself is going to give us the characteristics we want, then we have to design around that, and be mindful of + that while we're designing. There is also, of course, the opportunity to try to clean up our data in general, if + there are, you know, in situations where we can identify problems with the data, we should certainly tackle that or + imbalances in the data, we should certainly tackle that, that's one other step and I think there are many steps to + fairness and to ethical application of AI and no one step is a magic solution to all of them. But, if we stay aware + of the risks, make sure that we're talking to the right people and involving them, then I think that we can, you can + at least mitigate problems and know the limits of the technologies that we're using better. +
++ >> JUTTA TREVIRANUS: I have been looking at some of the ethical questions that have come in. One of the discussions + was about the Gaussian Curve or the Gaussian center, one thing that I think, one point that I may not have made as + clearly, it is that, in fact, the myth that we need to have a single answer at the very middle of the Gaussian curve + which, of course, matches our notion of majority rules, or as the way to decide amongst difficult decisions, an + alternative to that, it is to address the very, very diverse edges initially and to prioritize those. Because, what + then happens, it gives us room to change, it helps us to address the uncertainty and makes the whole design, or + decision, or options that are available much more generous and, therefore, prepares us better for the + vulnerabilities that we're going to experience and the future. Of course, I'm an academic, to say that that + statistical reasoning, evidence through scientific methods is at fault is a fairly dangerous thing to say, + especially during a time when truth is so much under attack. But, I think what we need to do is not reduce truth to + statistical reasoning but to acknowledge that there are a variety of perspectives on truth and that we need to come + up with one that addresses the people that we're currently excluding in our notions of truth. +
++ >> SHARI TREWIN: There are two minutes left I think now. Maybe we can squeeze in one more question here. Jan + Beniamin Kwiek asks do you think that AI and big companies driving research on it can be problematic towards + societal issues that don't necessarily give the highest revenue? If so, how can it be fixed? +
++ >> JUTTA TREVIRANUS: Yeah. That's a huge question. The government efforts are basing their decision making on profit + and economic progress and impact measures. I think one of the things that we need to abandon, it is this idea that a + solution needs to be formulated and we need to scale it by this formula type replication. We need to recognize that + there is a different form of scaling, it is by diversification and that we need to contextually apply things. I + mean, that's one of the lessons of indigenous cultures, that what's labeled as colonialist, it is what many + governments are in fact still implementing even in things like social entrepreneurship. Yes, big companies, of + course, they're driven by profit. Is that the best approach to achieve the common good? That's a huge question. +
++ >> SHARI TREWIN: It is a huge question. It would be a great one to come back to tomorrow in the symposium. Let's + come back to that one. I see we're out of time right now. Thank you very much, Jutta. +
++ >> JUTTA TREVIRANUS: Thank you. We'll have the positive tomorrow! (Laughter). +
++ >> CARLOS DUARTE: Thank you so much, Jutta and Shari, a great keynote, a very interesting follow up, great + discussion between you both. Also, there are still some open questions in the Q&A, if you feel like tackling them + now offline, feel free to do so. +
++ >> CARLOS DUARTE: Let's move on to our first panel. The topic for this panel will be computer vision for media + accessibility, here we aim to foster a discussion on the current state of computer vision techniques and focus on + image recognition, identification, recognition of elements and text in web images and media, and considering all of + the different usage scenarios that emerge on the web. We'll be looking here at aspects of how can we improve + quality, how do we define quality for this, the quality and accuracy of the current computer vision techniques, and + what are the opportunities and what are the future directions for this, in this domain. +
++ We'll be joined by three panelists for this first panel. Amy Pavel, from the University of Texas, Shivam Singh, from + mavQ and Michael Cooper, from the W3C. Great. Everyone is online, sharing their videos. Thank you all for agreeing + to join. I will ask you before your first intervention to give a brief introduction to yourself to let people know + who you are and what you're doing. +
++ I would like to start on one of the issues with quality, how do we define quality here? I was looking at aspects + such as how do we, or how can we train AI models that are able to identify aspects in an image, such as identity, + emotion, appearance, which are particularly relevant for personal images. How can we get AI to do that, what we + humans can do. I'll start with you, Amy. +
++ >> AMY PAVEL: Excellent. Thank you so much. My name is Amy Pavel. I'm an assistant professor at UT Austin in the + computer science department. I'm super excited to be here because a big part of my research is exploring how to + create better descriptions for online media. I have worked everywhere from social medias, like describing images on + Twitter as well as new forms of online media like GIFs, memes and I also worked on video, educational videos making + the descriptions for lectures better as well as entertainment videos to improve the accessibility of user generated + YouTube videos, for instance. +
++ I think this question you bring up it is really important and I typically think about it in two ways. I think about + what does our computer understand about an image, and then how do we express what the computer understands about an + image or other form of media. So, I think that we're getting better and better at having computers that can + understand more of the underlying image. For instance, we have gotten if we think about something like emotion, we + have got an lot better at determining exact landmarks on the face, how they move, for instance, or we may be able to + describe something specific about a person if you look at me, in this image, I have brown hair tied back into a bun + and a black turtle neck on. This is a type of thing we might be able to understand using automated systems. +
++ However, the second question is kind of how do we describe what we know about an image. If I gave you all of the + information about my facial landmarks, what I'm wearing for every context, that may not be super useful. So a lot of + what I think about, it is sort of how we can best describe or what people may want to know about an image given its + context and the background of the user. Just briefly on that point, I usually think about who is viewing this image, + what might they want to get out of it. Also, who is creating it? What did they intend to communicate? So, there are + these two questions I think give us interesting ideas on what data we could use to train, to create better + descriptions based on the context. For example, we might use descriptions that are actually given by people to + describe their own images or their identities or aspects that they have shown in videos in the past. On the other + hand, we might improve, we may use a bunch of different methods, and improve our ability to select a method based on + the context of the image. For instance, when I worked on Twitter images, we would run things like captioning to + describe the image like an image of a note may just say note. We also ran OCR to automatically extract the text and + tried to pick the best strategy to give people, you know, what we thought may be the best amount of information + given the image. That's my initial, I'm sure more aspects of this will come up as we have a conversation. I just + wanted to give that as my first part of my answer. Yes. +
++ >> CARLOS DUARTE: Thank you so much. Shivam, you want to go next? +
++ >> SHIVAM SINGH: Sure. Yeah. Hi, everyone. I'm Shivam Singh. I lead the document based products at mavQ, India. It + is a pleasure to be here with all of you. The question here, how should we train models dedicating on identifying + aspects like identity, emotion, personal appearances. That is a two part answer. +
++ I'm more of a technical background, I will go a bit of technical diversity here. Preparing a data on diversity, + that's the first point. Most available data, it is from publicly available data. We can carefully plan and prepare + the data before creating our models to include the weights for peripheral data of surrounding environment, like in + an image, there can be a subject, and there can be a lot of careful data . If we train, choose an algorithm that + take care of that peripheral data as well, that will be helpful in getting a better output. For example, you have a + subject gesturing, its relation with the environment, and it is linking emotion to its external manifestation on our + subjects area. This gives a more inclusive output, if you have a user, a person, it has a better identity, emotion, + and appearance and there should be a […] where we could have a diverse dataset, not, but it is not totally depending + on the availability of data. +
++ The second part of it, it would be fine tuning the model based on personal preferences. Let's say you have a better, + bigger model, right, you use that as a general model and then you can fine tune that based on the small, little, + small scale trainings and smaller datasets and you can fine tune it together to have a better result. Now, this fine + tuning, it is kind of a human in the loop feature, where every time you get the data you can expect some feedback on + your data and then perform a better output effect. That's something which is a bit of, it involves some human + intervention there. Yeah. That's how I see how we can train models. +
++ >> CARLOS DUARTE: Great. Thank you, Shivam. Michael. +
++ >> MICHAEL COOPER: Hey. So my name is Michael Cooper, I work with the Web Accessibility Initiative. I'm speaking + specifically from my role there, I'm not a machine learning professional, I'm not speaking about technology so much + as some considerations for accessibility that I'm aware of for that. In terms of improving quality descriptions, the + other two speakers spoke about, you know, technically how we do it. I think we may be able to give advice on some of + what needs to be done. For instance, machine learning, the output should be able to conform to the media + accessibility user requirements and the cognitive accessibility guidance, for instance, as sources of information + about what will be useful to users. +
+I'm also thinking of machine learning more broadly in terms of what tools might be used in different circumstances + and in particular, in the context as potential assistive technology. So the question for accessibility there is not + just what is the description of this image, what was the image description in this page for me, for the purpose I'm + seeking? You know, tools can get context from HML semantic, accessibility semantics like ARIA, and adaptive + technology, they can also generate their own context from machine learning algorithms. I think there is going to be + a need to have a way to communicate user preferences to machine learning, whether that is added to the semantics or + something. +
++ Let's see, just a couple of closing notes on that, users need to be involved in the design and training process, + that's sort of something that needs to be repeated. You know, we have to pay attention to that as we look to improve + that. I would also note that while this session is mainly focused on, you know, images and media, virtual, augmented + reality has a lot of the same problems and solutions that we should be looking at. +
++ >> CARLOS DUARTE: Okay. Thank you for starting that discussion. One thing, I guess it was mentioned by all of you, + in different ways, it is the role of the end user and in fact, I guess both users were mentioned, the one that is + viewing or requiring the image or the description of the image, but also the one that's creating or sharing the + image. For that one, there is the responsibility of generating a description and, of course, we know most people + don't do that, so, that's why we also need these AI based systems to take on that role. But this leads me to another + aspect, if we have an AI based system that's capable of assisting both the content creator and consumer, how does + this impact the agency of end users? Will end users feel this is no longer their responsibility because there is a + tool that can do this for them, or if we explore this as something that we're now looking at this from the content + producer perspective, if we see this tool as something that helps someone generating a description, would this + producer just start relying on the output from the AI and thinking about what Jutta had introduced earlier today, + wouldn't the, and she mentioned this as an organizational mono culture, can we also think about the description mono + culture, which all descriptions would start conveying the same kind of information. What's your perspectives on the + impact that this has on the agency and end users? I will start with you. +
++ >> SHIVAM SINGH: Awesome. It is a bit of a question. Let's say we're talking about the quality of our output based + on the user, right, the end user. The quality of this description depends on how end users consume it. For example, + most models currently provide high-level and grammatically correct captions in English, but that would not be true + for captions generated in other native language of other users, it may not have enough of a dataset to train the + model. Now, the premise of training restricts this diversity of generated captions and the use cases of what all + things in the model it can comprehend and then generate the caption which includes, like, the diverse text like a + diverse text, line an email, a date, or correctly explaining graphs, which is a big problem until now. Once a + translation with AI is employed, how well it becomes an input is […], for example, you can have two different + models, one is precise and a general one. The general output of a model can become an input for a specialized model + for a model and then you can refine it. This is how we're now achieving it. +
++ The other thing is the caption generated by AI consumes very large amounts of data to curate content, and in many + cases of live caption generation, AI should put in context the earlier events or early inputs as well, and this is + true for a context of the conversational bots, but this can be also a talk where you have a live caption generation. + So you have to put some context there and then you have to generate the captions. Now, we have mature engines like + GPT3, but this is more complex than a simple image to text generation, the speed, and handing of the peripherals, it + is very much necessary. We're looking forward to a better solution where the end users are really satisfied with + what they're getting. +
++ >> CARLOS DUARTE: Thank you. Michael what about the perspective from the end users, the agency of end users from + your point of view? I guess more the Web Accessibility Initiative role and how can we guide technical creators to + ensure that end users remain with autonomy to, when creating this kind of content. +
++ >> MICHAEL COOPER: Yeah. So ,first I would, you know, look at, you know, what are the ways in which the machine + learning generated descriptions and captions increase user agency and there are ways to decrease that as well. For + instance, although we would prefer that authors provide these features, if they don't, providing them via machine + learning will help the user access the page and give them the agency that they're looking for in the task. + Descriptions don't have to be perfect to provide that agency. That said, it is frustrating when they're not good + enough, they can often mislead users and, you know, cause them to not get what they're looking for, spend time, et + cetera. That's a way this can be a risk for users and, as you mentioned, there is likely to be a tendency for + content developers to say machine descriptions are there, so we don't need to worry about it. You know, I think + those are, you know, simply considerations that we have to pay attention to in our advocacy, in the education work + in the field, also in documenting the best practices for machine learning. For instance, W3C has a publication + called Ethical Principles for Web Machine Learning + that talk about, they address accessibility considerations among others, and it is possible that the industry might + want a documented set of ethical principles or a code of conduct that the industry organizations signed on to saying + here's accessibility ethics and machine learning in addition to other ethics that we're paying attention to. Those + could be ways that we can support the growth of user agency in the end of this. Yeah. +
++ >> CARLOS DUARTE: Thank you for that perspective and raising awareness to the information that the WAI group is + making it available. I think that's really important for everyone else to know. Amy, what's your take on this, on + the impact that these tools can have on the agency of end users? +
+>> AMY PAVEL: Yeah. So I might answer this briefly from the content creator side. Say you are out to make a + description, how could we use AI to improve the description, improve the quality of descriptions and the efficiency, + rather than sacrificing one for the other? I'll start with, I worked on tools a lot in this space. I'll kind of + start with what hasn't worked in the past and then share some possibilities on things that work a little bit better. + One thing that I worked on for quite a while has been creating user generated descriptions of videos. Video + descriptions currently appear mostly in highly produced TV and film and they're quite difficult to produce yourself + because they're sort of an art form. You have to fit the descriptions within the dialogue. They're really hard to + make. So one thing we worked on is some tools to make it easier for people to create video descriptions by using AI. + So, what didn't work was automatically generating these descriptions, the descriptions were often uninteresting, and + they didn't provide quite the depth that the original content creator had included in the visual, in the visual + information of the scene, if it is simple, a house, a tree, it may get it. If it was something domain specific or + had something extra to it that you may want to share, it was completely missing. One thing we looked at, how to + identify areas where people could add description, silences or how to identify things that were not described in the + narration. At this point, the narration of the video talks about, is talking about something completely unrelated to + the visual content. People may be missing out on that visual content. +
++ Rather than trying to, like, automatically generate descriptions, I think one promising approach can be to identify + places where people could put in descriptions or if they write a description, identify parts of the image that that + description doesn't cover yet. I think that there is kind of some cool opportunities to use AI in unexpected ways to + help people create better descriptions. +
++ I'll briefly address the end user part. You know, if the user is lacking, so the person using the captions, the + descriptions, if they're lacking information that can decrease the ability to have agency and responding to that + information, right? If you give them all of the information, you know, in one, big, piece of Alt text, you may not + give people much agency over what they're hearing or probably not matching with the cognitive accessibility + guidelines that Michael had mentioned. +
++ I have experimented with some ways to try to, like, maybe help people get agency over their descriptions, one thing + we have played with a little bit, it is, you know, asking basically alerting people to the fact that there is a + mismatch between the audio and visuals, for instance, in listening to a lecture, hey, the lecturer hasn't talked + about this piece of text that's on the slide. Would you like to hear more about it? Then people can optionally hear + a little bit more about it. That's something like OCR, automatically detecting text works quite well. There are + these opportunities that you don't want to overwhelm people with information when they're doing a task that's not + related, but there are some cool opportunities, I think, to give people control over when they get more information. + Yeah. +
++ >> CARLOS DUARTE: Thank you, Amy. + Before moving to the next question I have here, there is a follow up question on this by Matt Campbell on what you + just mentioned, Michael. You mentioned descriptions not being good enough are a risk for user agency, what Matt is + inquiring is how much can this be mitigated by just tagging the descriptions as automatically generated. Can you + give a perspective on this, also, Amy, if you want to following Michael? +
++ >> MICHAEL COOPER: Yeah. I'll try to give a quick answer. + So is the ARIA technology, accessible rich Internet applications, enhances HTML with the ability to point to a + description elsewhere in the HML document rather than providing a simple alt text and that gives you the rich + capability and we have that now in terms of identifying it is a machine generated description, we don't have a + semantic for that, but that's the sort of thing that would get added to ARIA if the use case were emerging. +
++ >> AMY PAVEL: Yeah. I will also, I'm happy to also answer this question, maybe I was looking at Matt's other + question, kind of related, I think. Are there other alternatives that are richer than alt text alone? One thing + we've looked at a little bit for, I worked a little bit on the accessibility of complex scientific images. What you + end up with, it is complex multipart diagrams that if you try to describe in one single, you know, alt text field it + performs quite badly. We're kind of starting to see, like, could we automatically break that big piece of alt text + down to a hierarchy to match the image so that maybe people can more flexibly explore like they would basically an + HTML version that sort of captures the structure of the image that people could explore. + Kind of thinking about other ways to present all of the information that currently gets relegated sometimes to a + single alt text into something that's a little bit more rich. +
++ >> SHIVAM SINGH: Carlos, you're on mute. +
++ >> CARLOS DUARTE: Thanks. + What I was saying, since we have been coming always around to the topic of or to the concept of quality, also one + question by Mark, Mark Urban, I think, it would be interesting to know what's your take on this. So is there a + documented metric that measures the quality of an image description, and if there is, what would be the most + important priorities for the defining quality. + Amy, you want to go first? +
++ >> AMY PAVEL: This is a hard question for me. I think that the answer is no. It is really a good, it is a really + good question and something that we constantly sort of battle with. So, we kind of abused in our work, you have used + a four point description, literally nothing, there is something in the description field but it is in no way + related, there is something related to the image but it is missing some key points, and this covers most of the key + points in the image. We kind of have been using this and what the values mean depends a lot on the domain and what + task the person is using the image for. But it's been like... you know, we've used this in a couple of papers and + it's just been like a way for us to, you know, make progress on this problem. and we have also tried to for each + domain we're working in, kind of tried to inform it based on existing guidelines as well as literally the existing + W3C guidelines and what users have told us about specific to that domain. I don't know of a good one. That's + something that we just sort of worked around. It would be great to have more efforts on that in the future. +
++ >> CARLOS DUARTE: Definitely something that's more qualitative than quantitative, definitely. What you just + described is a good way to start. So, Shivam, your take on the quality of image description? +
++ >> SHIVAM SINGH: Sure. So I guess when we come to industry set up, we have certain evaluation tools, we evaluate our + models as well as the outputs, there's a rigorous testing that goes on, but there is no set of metrics that we have, + but certainly we have some rules, we have W3C guidelines, we have some other guidelines as well that are in place. + They are not set rules, but, yeah, we have tools as a yardstick and we can build that test based on that only. There + can be some work done with that, yeah, certainly this is what we have currently. +
++ >> CARLOS DUARTE: Okay. Michael, Amy just mentioned, answered looking also at the definitions that W3C provide, do + you want to add something + on how can we measure quality of image descriptions? +
++ >> MICHAEL COOPER: The only thing I would really add to what she said is, so, we produce resources like + understanding WCAG, ,understanding the web content accessibility guidelines which goes into when you're writing the + image descriptions, what are the considerations, how do you make a good one. A big challenge for machine learning I + think, in particular, it is the quality, the appropriate description for an + image would depend very much on the context. We described several different contexts in the support materials and, + yeah, the right description for one is the wrong one for another. Sorting that out, I think it is one of the big + challenges beyond what others have said. + +
++ >> CARLOS DUARTE: Yeah. Definitely. I have to agree with you. Apparently we're losing Shivam intermittently and he's + back! +
++ I'm going to combine two questions that we have here in the Q&A, and the one from Jan Benjamin and the other from + Wilco Fiers. It is more about qualifying images than really generating descriptions for the image. Jan asked can AI + differentiate between, for example, functional and decorative images rather than just generating a description, just + differentiating between an image that needs a description and one that doesn't? And Wilco asks if it is viable to + spot images where automated captions will likely be insufficient, so that content authors can focus on those and + leave the AI to caption, to describe others that might be easier for them. Amy, want to go first? +
++ >> AMY PAVEL: Sure. Yeah. I love both of these questions. I would say to Jan's question, I don't think, you know, + when the question is can AI do this, you know, we have tried this a little bit for slide presentations. The answer + is yes to some extent. It will fail some places. + To give you an idea of how, you know, AI could help may help detect decorative from non decorative, from more + informative images, like in the context of a slide presentation, it is informative images might be more complex, + they might be more related to the content on the rest of the slide and in the narration. Informative, they might be + larger on the screen and decorative on the slides might be, you know, like little decorations on the sides, they may + be logos, or like emojis, or less related to the content on the screen. What we have found out, we can do a decent + job at this, but it will fail in some cases always. Like maybe an image is included, but there is no other + information about it and it is tricky. In doing this, you want to be overly inclusive of the images you identified + as informative so that maybe you could help content authors make sure that they at least review most of the images. + +
++ I would say to Wilco yeah, that's a great idea. We have tried it a little bit on Twitter. One time we ran basically + a bunch of different AI methods to try to describe images on Twitter, and so for each image we try to run captioning + OCR, we did this URL tracing to see if we could find a caption elsewhere on the web and basically if all of those + had low confidence, or they didn't return anything, then we kind of automatically sent the image to get more human + written descriptions. Another thing we explored, users optionally, retrieving the description. It is possible. The + subtleties that are there, they're difficult to view automatically. At least that was a way, given how many images + were on Twitter without descriptions, it was sort of a way to filter out the ones we definitely need to get more + information from a human. Yeah. +
++ >> CARLOS DUARTE: Great. Thank you for sharing those experiences. Shivam? +
++ >> SHIVAM SINGH: I guess I have been in contact with this scenario, where I had to get descriptions of images that + most likely will not get very sufficient on a machine description. So there are ways, tools that can do that for + you, on websites, there are multiple plug ins to use. You can give certain descriptions and people can put certain + human descriptions over there. + To mark them, to spot them in a scalable manner, it sometimes does not become scalable. + That's the whole issue. + You can have a tool, it may not be scalable for every user out there, every website out there. This can be done, + but, yeah, again, there are instances where it can be used and where it can't. The technology, that's the answer, + how to scale it, that's the question. + +
++ >> CARLOS DUARTE: Great. Thank you. + Michael, do you have any input on this? + +
++ >> MICHAEL COOPER: No. Not on this one. +
++ >> CARLOS DUARTE: Okay. + That takes me back to one question that I had here, taking this opportunity to go back there. + I will start with you, Michael. + It's going in a different direction than what we have been going so far. + How do you think that we need to deal with legal copyright and responsibility issues when generating descriptions + with AI based models? How do we tackle that? + +
++ >> MICHAEL COOPER: Yeah. Okay. + You know, also, you know, not speaking as a legal professional, but issues that I know about, in general, at least + for accessibility, there is often a fair use, the right to transform content, but I'll circle back to that, but, you + know, that's the first question, but then there are issues around accuracy. + If a machine has generated a caption or description, you know, how accurate is that description, who knows how + accurate it is, and also publishing it, especially with potential accuracies can bring on liability consequences + even if the use is otherwise allowing that publication. + +
++ Another, you know, challenge, it is meeting requirements. + If accuracy is high, pretty high, but still not quite right, if it is a legal document, it may not be sufficient, so + depending on the accuracy of these kind of descriptions is going to be a vague, you know, legal challenge for a + bunch of different directions. Of course, you know, there is the benefit, the reason to do it, this still can be + better than nothing for many users, you know, who get used to some of the inaccuracies and it does provide + scalability given how image and video focused our web has become. I would highlight one of the ethical principles + from the ethical machine learning document is that it should be clear that the content is machine generated allowing + many actors to evaluate it. + +
++ Circling back to fair use, I think who is doing the generating, or publishing machine learning content will probably + impact that if it is a user agent and assistive technology, it is probably covered by fair use. If the content + producer is doing, they're probably declaring fair use for themselves but the responsibility for accuracy, it will + be higher for them because they're now the publisher. There are third party agents of various sorts, accessibility, + remediation tools, other sorts, where I assume it is a legal wild west. +
++ >> CARLOS DUARTE: Definitely. To make it worse, I guess, there are many wild wests because every country, every + region might have different legal constraints there. Shivam, any take on this? +
++ >> SHIVAM SINGH: Yeah. So I have a holistic view of how technical this has been. This was when this is an ongoing + issue with a lot of countries now. + You see almost all publicly available data sets, right... These are the data that are associated in some or other + form as a copyright one. There is no frame, in most part of the world that deals with the legality of the generated + captions, there is no written law in any place, or it might be coming later, maybe in the U.S. first. This is a + complexity these are some complexities. The owning of the AI owners of the data, if it is a machine generated data, + who will be owning that data? The industry that built that model or the dataset that has been gathered from + different data sources. This is a very complex challenge. +
++ The other part of it, how would you fix the responsibility? To keep that in mind, it depends on the end user of the + model. When you use it, in what context are you using it? For example, some of the models that are used in academia, + these are just for the research and development purposes, there is no way where you can fix the responsibility on + the academy of work. These are the this is helping in two ways. + This is how you source the data, either you have to get text on the data, where it is coming from, you gather the + data, based on written sources, you have the mutual understanding between the data creator and you, then you train + on the data. That gives you a complexity where you have the small dataset and there is a large input going in the + training to the data. + These are the complexities currently and yeah, it all depends on where the model or audit is being used. That's + where the fair use policy comes. + +
++ >> CARLOS DUARTE: + Context all the way in all scenarios, right? + Amy. + +
++ >> AMY PAVEL: I'm not as familiar with the legal and copyright side of this. I think, you know, oftentimes I do + think about the responsibility aspects of the captions that we're generating, especially when doing these kind of + new forms of we're generating things like user generated media. + This more goes back to the potential harms brought up in the keynote. + For instance, I think one thing I'm often thinking about, when are errors not that big of a deal and when are they a + bigger deal? Kind of looking at their risks and trade offs in terms of who like who is receiving the image and who + who is getting identified by the tool and who is receiving the image. For instance, if you misidentified my shirt as + dark blue rather than black, this error is unlikely to be as harmful to me, but for some people might experience + misgendering them with image classification to be harmful. Two ways of dealing with this, you know, not to say that + either of them is good right now, one, a lot of tools back off the same person, rather than saying women or man. + Another way that you could imagine doing it, it is describing physical characteristics of the person that are less + subjective and a final way you may imagine doing it, it is considering people's own identifications of how they + would like to be described. Sometimes that varies in different contexts. That itself is a hard problem. Yeah. I + don't have much to say on the legal, copyright side, I wanted to bring up that's something that's come up in my work + before. + +
++ >> CARLOS DUARTE: Thank you so much. + We're almost at the end. We have less than 10 minutes, and questions keep coming. That's great. You will have the + opportunity, I guess, to guess try to answer some of them offline if you wish to. I'll still take another one. The + last one we have here, it is Antonio Gambabari. The question is how do you envision the challenges of explainable AI + initiatives in the context of image recognition. This relates to several of the aspects that we have dealt with, + with the uncertainty of images and how to convey that to users, just by labeling something as automatically + generated, would it be a way to convey that. Do you think that explainable AI initiatives have the potential to + improve this kind of augmented context for the user and where did the description came from. + This time, I'll start with you. +
++ >> SHIVAM SINGH: I think, yes. It is a good point. Explainable AI initiative, it deals with how metadata can help + the end user to know the context of what's being generated, any quantitative score on any of the models , it is + supported by a lot of data going beyond your training. Right. + There is a restriction though, whatever things you're getting an output, the metadata can, there are multiple layers + of training if you look at the training. There are multiple layers of training. How it is made by AI, it gives a + different level of metadata but not all. It could augment the user but that won't be the complete solution. That's + how I see it. + +
++ >> CARLOS DUARTE: Amy, any thoughts on this. +
++ >> AMY PAVEL: Yeah. + That's a good question. I don't know. + I think some things, one thing I would think about a little bit in this, and I have had to think about before, it is + sort of, like, the trade off between receiving information efficiently and explaining where you got all of that + information from. + I think both are important. I think maybe, like what my experience has been, users are used to certain different + types of errors and can recover from them quickly. + For instance, like when a user is reviewing their own content, for example, they took picture, video, and they hear + something described as a leash. I have had the experience of users being like, Oh no, that's my cane, it always + calls my cane a leash. In some cases, people can get used to identifying the errors for the known unknowns. This is + just a wrong identification, I'm used to it. I think it is harder to recover from errors that are unknowns, + unknowns. There are no other contexts about it, you don't know what else it could be. Maybe in the cases where the + users haven't identified it before that confidence, that information is extra important and so, yeah, not really + sure what the answer is. + I think that considering the balance between what is important and to know more information about this, will be a + tricky design question, a question for how to develop technology. + +
++ >> CARLOS DUARTE: Great. Thank you. + Michael, any input on this one? + +
++ >> MICHAEL COOPER: No. I would just add to all that, you know, this again falls into the question of ethics, + transparency and explainability, it is one of the section of the machine learning ethics and addresses several + aspects of knowing how the machine learning was built, it should be auditable for various issues. These ethics are + probably less specific to some of the use cases that we're discussing in the symposium so there may be room for + adding to this section of the document. +
++ >> CARLOS DUARTE: Yeah. Yeah. I think that may be a good idea. + I'll take just a final one, going back to the topic, one from Matt, it is something that we have touched upon + before. I'll mention you this, Michael, we have mentioned this already in the scope of ARIA, the question is about + having richer alternatives for the image description, to the standard alt text, which is usually short. What are the + thoughts on the usefulness of having richer descriptions for image alternatives. + +
++ >> MICHAEL COOPER: As far as the general idea in terms of the usefulness of making use of richer descriptions. So, + for very simple images, as for the way that the web has started, images were largely providing small functional + roles, things were sufficient for many cases, images are now used nowadays for a variety of purposes and some are + reducible to an alt, like a photo of my dog, it is not really providing the experience. + You know, there is definitely a need for richer alternatives and longer alternatives, ones with structures to skim + them, depending on the context, ones that you can provide links to the necessary bits of alternative data, the + question about images on charts, often the description for a chart is much more structured semantically than for + other kinds of images, and you want to be able to take advantage of a rich text mark up. I believe that assistive + technology, they're supporting, you know, rich text descriptions whenever available, it is a question of getting + people to use them more. + For machine learning generated, I would rather than do richer rather than less rich output. + +
++ >> CARLOS DUARTE: Yeah. Following up on that , for Shivam and for Amy, by having richer and longer descriptions, are + we increasing the chances that AI generated descriptions will mess up or isn't that a risk? Who wants to start? Amy? +
++ >> AMY PAVEL: Sure. I think we're definitely I agree, oftentimes the more detailed that you get, the more + opportunities there are for errors. A way we have kind of explored this a little bit, is seeing bring especially for + very informative images, that maybe a lot of people will see, we thought of how to combine automated tools with + human written descriptions to hopefully make some of the descriptions better, maybe automated tools could help you + help automatically extract the structure of the image and humans go in to write more detail about the parts of the + image that are really unlikely to be fully described by the computer. + For now, the way I think about the complex images, it is often in a how are we going to help humans create + descriptions more efficiently by still maintaining high quality, rather than thinking of how to do it fully + automatically based on the images I have looked at in the past. Yeah. +
++ >> CARLOS DUARTE: Thank you. Shivam, any input? +
++ >> SHIVAM SINGH: I think the inspiration behind this question would be to give us structure to the output. So it is + a structured output that makes more sense than to have a fallback estimate. + You can provide more information to the output but the output would, should be shorter and more explainable, it may + be grammatically incorrect, that could make more sense to the end user and he may have another option to explain + that. It's not like you have a string generated out of an image, right? When you read out to a screen, right, it + should concisely read it, short, briefly. And for more description, there should be some other excellent data can be + supplied to it. And then there are multiple ways we can do this. But the description of an ultimate should remain + concise and grammatically correct. So that screen readers can try to read it, but that's how I see it. + +
++ >> CARLOS DUARTE: Okay. Thank you so much. And I want to thank the three of you once more for agreeing to take part + in this panel, also for agreeing to take part in the next panel. So as we can see, media accessibility, it's really + a rich topic and definitely computer generated descriptions are also linked with natural language processing. So + what that will be the topic for the next panel in just under 10 minutes. So we'll have a coffee break now and I hope + everyone's enjoying, and we'll be back at ten past the hour. +
++ >> CARLOS DUARTE: Welcome to the second panel of the first day. + This panel will aim to discuss the current status of natural language processing techniques, and, here in the + context of the web we can think or, we know that they can be used to generate textual descriptions for images and + also for other visual media presented on webpages. + We'll focus today our discussion or, or start to consider aspects such as providing understandable text to better + meet web user needs and the different contexts of use, and also what are future perspectives for natural language + processing on web accessibility or to support web accessibility. + I'm glad to welcome back Michael, Shivam and Amy, there you are, Amy!. + Also to welcome Shaomei Wu from Almpower.org who agreed to join us on the second panel of the day. I welcome you all + back, welcome, Shaomei. + For the first intervention, I ask you to briefly introduce yourself, your three other copanelists have already done + that in the previous panel. No need to reintroduce yourselves. +
++ I will start by thinking about once again the quality, we go back to the quality topic and now the quality of + machine generated descriptions and now no longer from the perspective of image processing but from the perspective + of the natural language generation. How do we improve the quality of the machine generated descriptions, especially + taking into account the personalized preferences from users. + I will start with you, Shaomei. +
++ >> SHAOMEI WU: Thank you all for having me here today, my name is Shaomei Wu, and right now I'm the founder and CEO + of Almpower.org, a non profit that researches and cocreates in empowering technology for marginalized users and + first of all, I want to also share that I do have a stutter. You may hear that there'll be more pauses when I talk. + Before Almpower.org, I was a research scientist at Facebook leading a lot of research and product work on + accessibility, inclusion, equity. + One of the products that I shipped was automatic alt text, a feature that provided short and machine generated + description of images on Facebook and Instagram to screen reader users in realtime. +
++ When it comes to quality of automated Alt text and other familiar systems, we saw two biggest areas of development + that we wanted to do. The first one is accuracy, which I think we talked a lot about in the last panel as well and I + want to talk a bit more about the second one which is the richness of the descriptions. To be honest, what we + generated, the alt text, it was quite limited, and a lot of users, many say it is more of a teaser, oh, yeah, people + smiling, pizza, indoor, but no more than that. What kind of environment is it? Is it a home? Is it a restaurant? So + I think our users, they really wanted kind of get all kind of all the richness of someone who has eyesight can see, + can handle the access. +
++ One particular kind of area that users want to know more, it is about people, who they are, how do they look like, + race, gender, even how attractive they are, because that is something that's socially salient. That was a kind of a + big challenge for us, when we were designing our system because, like how can we share those kind of attributes in + the most accurate, and kind of socially conscience way. + We actually chose not to show the race and the gender of the people being photographed, which we got a lot of + complaints on, but how to kind of look at this in a socially respectful way and I think it is you know, we should + really work on this and now I can see handling a few ways that we can make that better. For example considering the + relationship between kind of people in the photo and viewers, for example, like if they're friends, then we can put + in the name, you know, other things about those people, and another thing, it is kind of to give progressive + details, so to have some kind of an option to kind of allow the consumer to kind of request more details that we + cannot just provide by our systems. + I will be done here and allow other panelists to talk. +
++ >> CARLOS DUARTE: Thank you, Shaomei Wu. + Shivam, your thoughts on how can we improve the quality of machine generated descriptions? +
++ >> SHIVAM SINGH: This is a two part thing. + When you come to technically implementing models, how you have designed the model, how you have trained them, and + who are the stakeholders of designing a particular model, it is very much necessary and how they're going to get the + quality machine generated description. + When we take into account users personalized preferences, this is two parts. Let's first take an example. I am a + person who knows Spanish, right, my model, a very famous model, it gives descriptions in English. So now the model, + whatever the consumption of the model is, let's say you use an API to consume the model. That should take into + account the personalized preferences of the users of the language and write the output based on that as well. This + diversity of model to prepare output in multiple formats and languages, it is something that can be looked into, + this is how the quality of machine generated description increases. Now, you did not train the complete model + separately. What you can do is to create post-processing scripts for the models and that can help end users. There + is not much of an effort when we say as a model training input, it is a simple solution to what can be the solution. +
++ The other thing is, how you prepare the quality data. You should fully and carefully categorize it, the structure of + the data, if needed, and let's say you have input data that are blurred images and all sorts of this.. You have to + carefully prepare the model and train the data, and based on that, the description would be a bit more clear and the + personalization would also be affected when you look into how you can post process the data for certain groups of + people. + That's how I see it. +
++ >> CARLOS DUARTE: Thank you. + Amy, want to share your experiences. +
++ >> AMY PAVEL: Sure. + A couple of ways that I have seen that are sort of promising to use NLP to improve quality, one thing I have seen + recently, people starting to consider context around the image that's going to be described to maybe create a + description that's more helpful. Imagine someone writes a post on Twitter, and they have coupled that post with an + image. Considering the post and the image together, maybe it may inform models on how to create something that's + more informative. For instance, if I posted a picture of myself snowboarding, I said I learned a new trick, then it + may be important to tell me what trick you learned. Whereas on the other hand, I said I just went on vacation, you + know, the exact trick may not matter as much. I think that the idea of, like, using language understanding to get + more information about the context before making a prediction is promising. +
++ Another way I have sort of seen it used to maybe improve the quality, it goes back to the other answers that were + given. Maybe you can use question answering about the image to gain more information when you need it. One thing I + have also thought about, it is seeing if maybe users could give examples of their preferences about descriptions in + natural language. This is an example of a description, maybe we can copy the style of this description when we're + applying it to other descriptions. Maybe I like to hear about the costumes someone wears in a video and I wish that + future descriptions could include more information about that rather than summarizing them. +
++ Finally, one other way, I have used NLP to improve quality, it is also based on summarization. So there can be times + when there is more to describe than time to describe it, especially videos, there is often a really small amount of + time to describe without overlapping the other audio. One way you can use NLP to improve that quality, it is by + trying to summarize the descriptions so that they fit within the time that you have. They don't decrease the + experience of people trying to watch the video and hear the audio at the same time. Yeah. +
++ >> CARLOS DUARTE: Yeah. + That's a good use for NLP. + Michael, still on this topic, I would like to have your perspective on initiatives on WAI that may assist users in + providing their preferences, so that eventually models can use those for anything that may be ongoing in that + regard. +
++ >> MICHAEL COOPER: Yeah. + First of all, to repeat this, for anyone doing this session that I'm not a machine learning professional, I'm + speaking from my perspective of the work on the Web Accessibility Initiative. + I want to talk briefly, other panelists covered almost anything that I would have said. One thing that, based on my + knowledge of how machine learning works generally today, and models tend to be focused on, you know, a particular + ability and it is not universal and in the future AI models will have more abilities combined and so there may be + more than one model recognizing this is a human, here are the attributes, another one saying that is this human, and + another one that can say this human plus that human equals this relationship. All of that information I believe is + separate right now. The ability for models to share context, it is going to be the part of the solution that we + need. +
++ What I can speak of from the Web Accessibility Initiative, we are only beginning to explore what AI and + accessibility means and this symposium is part of that process. + We have a practice of doing research papers, sort of literature reviews and then proposing accessibility user + requirements. That would be something that we could be working on to start gathering this information and from there + we decide what to do with it, does it go in guidelines, new technologies, whatever. I think most of the resources + around AI would fit into new resources for those categories. +
++ >> CARLOS DUARTE: Thanks. + I would like to now move on to addressing something that was at the core of the keynote, discriminating bias, or any + other sort of bias. Here looking at something that's been entered in the Q&A for the previously panel but I think it + is also very well, it fits well into this topic, and it brought out the use of large language models (LLMs) which + are currently getting a lot of attraction and a lot of spotlight. + Do you think this LLM can open up new avenues, Antonio Gambabari mentioned for reducing different types of bias that + we see as a result of the use, of AI training models? Shivam, you want to go first this time? + +
++ >> SHIVAM SINGH: Yeah. Sure. Sure. + This is quite a question, which has been close to my heart as well, how to address social bias in Large Language + Models. Right. We have seen a lot of trainings on this, so socializing this, this is also data that they have been + trained on, how the social attitude is of the data presented within that model. Most of the available data, it is + used to train models and we use old ones, containing a certain degree of bias, most of the data generated on the + Internet, it is basically those, people who can consume it, it is not everybody, some don't even know what the + Internet is, they cannot create data over there. + Most of the data available to train the model is based out of that. That's how you see the bias in one way. + +
++ The other instance I can give an example, you will see a lot of violence, homelessness, other things, all of those + things are over represented in the text and that's, these are both not similar, but you will find these kinds of + representations in the LLM outputs. + How to address this, there is another way of human in the loop feedback on existing models where you provide some + feedback to the already existing model, that this is the sort of output that's not correct, this can be a correct + version, this can be another version. + Some human interface is needed with that. Now, this aspect of the data, it is a representation of the models now and + the underlying data of model, the main source of the issue here. You need to correctly source the data and correctly + structure the data so that you're not over representing one section of data, for example, let's say that you have a + bigger society and the society can be underprivileged, over privileged, maybe other sections of the society. You + cannot just take the data from one section of society and train that model and say this is a picture of this + particular area. There are many areas underrepresented, that's happening with all of the models at the start of LLM + you can see. + +
++ Now, what we can also do to mitigate this, you can create an inclusive workflow and develop the models and the + designing of that model and you give the inclusive workflow training to them, you get them aware of what's happening + and what and how to mitigate this. All of the persons who are included in the generation, there is a lot going on, a + lot of data extraction goes on. All of these people can be trained for inclusiveness. There are multiple tools that + help to us do that. Like, if you're creating a model, you can test that and Google helps us, Google has the tools, + how the models are performing when you talk about a lot of inclusive outputs of the data. Also, you need to do a + thorough testing of the models when you go ahead to include tha,t all of the outputs, they're properly aligned and + properly represented, all of the sections of the model which it is intended to be used, it should be represented + well. The testing, it should be there in case of any models that you're creating. Now we have noted that we're at + the stage that AI and LLMs, they're quite mature right now and we're seeing a lot of data technologies and we can do + this going forward, I guess this can be a solution. +
++ >> CARLOS DUARTE: Thank you, Shivam. Shaomei, can I have your input on how can we address social bias or other types + of bias? +
++ >> SHAOMEI WU: Yeah. So on these, I want to kind of go back to what I had talked about before, in particular on the + sensitive social identities about people on the photos. + I don't see a good way for the current machine learning system to accurately come out with those labels. The key + kind of issues here, it is a lot of those systems, they really assume these like fixed social categorizations such + as race and gender. + I think maybe we should think beyond the machine learning systems and kind of find the way to kind of attribute + people respectfully through, you know, having the agencies, of those being photographed and being described. + For example, I think now a lot of people has been kind of intensifying their pronouns in their social media bios, + all of this information should be made use of or should be kind of made use of when assigning on the way that we're + describing the gender of somebody in the photo. +
++ We have the other directions that we have been exploring, it is sort of describing the appearances instead of + identities. + For example, kind of describing skin tones or hairstyle and outfit instead of assigning a kind of race or a gender + label of somebody. I don't think any of those solutions can really adjust the kind of the real cause of the problem, + so I don't really have a very good answer on this. I think maybe we should, maybe the alternative, is to kind of + think of a way to come away and kind of share who we are., so much relying on the images like we are today. You + know, how can we convey the information that we want to share online in a not so visual centric way. I think that's + kind of a bigger question. + +
++ >> CARLOS DUARTE: Thank you, Shaomei Wu. Amy, next, to you +
++ >> AMY PAVEL: I think that the prior answers mostly covered the things I was going to mention. I loved Shaomei Wu's + answer about describing ourselves in ways, or like figuring outweighs that don't rely on the visual information and + giving agency to people to add their own identities that they want to be shared. + I will say that I think that depends in different context, you may want to share different parts of your identity if + it is important to you and you might, I think, even things that give end users agency may have a lot of subtlety and + how they would be applied in different cases. I like the idea of describing, you know, aspects of appearance. I + think you're missing one challenge with that, you might be sort of trading off between these aspects of appearance + that you're describing and the efficiency with which someone can, like, maybe they're not going to get the + information as quickly as a sighted person would perceiving that person and just because, you know, audio occurs + over time. I think that it is an extremely difficult challenge and in some cases it can matter, like I can imagine, + you know, seeing a photograph of the leadership of a company, you may want to know some quick details about the + demographics of who is leading it for instance? + +
++ One thing, I have noticed, it is that it is sort of related to this. You know, when I'm, when I'm asking, so I + sometimes, you know, have people describe videos. There can be a lot of differences in which aspects, even if + they're going to describe the aspects of someone's appearance, the way they describe those based on who is in front + of them can also differ based on biases people have. + So if people see a women, they may describe her differently than they would describe a man, they may focus on + different aspects of appearance and different things going towards describing aspects of appearance will have to be + very carefully designed and it feels like a challenging problem. Yeah. + +
++ >> CARLOS DUARTE: Thank you so much, Amy. + Michael, any thoughts on this and I would add something here, especially for you, it is that do you see any future + role of accessibility guidelines in contributing to preventing bias in machine learning generated descriptions or + whatever results in these models. + +
++ >> MICHAEL COOPER: I have an answer for that question, it could be longer than my prepared answers. Let's see where + we go. + I would like to add a couple of thoughts to what others have been saying, I want to first categorize bias, we're + talking so far of being labeled bias in recognition, is there biases of how machine learning recognizes objects, + people, et cetera, context, and in that, one thing that magnifies this challenge, in accessibility context, it is + that the sample size of People with Disabilities can be smaller in various training sets and there is a risk that, + you know, images of People with Disabilities, the training set, contexts that are important for them, wheelchair + ramps, something, it will be excluded as outliers or will be less well recognizable by the AI than, you know, the + images of other people. + +
++ You know, that's just another, another added dimension to the aspects that we need to look at. + We also need to look at the bias in the application of this. You know, we have talked a few times during the session + about the risk of relying on machine generated descriptions and captions as being good enough, whereas content that + has more mainstream audience may also have captions and descriptions that get more curated in what you will call + assurance. That kind of bias could creep in and that can magnify the impact on disability bias because it can cause + people to be excluded from the fora that, from which people are recruited to be part of training sets, et cetera. + So again, the ethical principles of where machine learning speaks to that, and I think we may be identifying some + content that we need to add to that. + +
++ Moving on to what WAI can do about that, you know, I do believe it is within the scope of the Web Accessibility + Initiative, or the W3C to provide guidance in some form about how AI and accessibility should work together + addressing many of these things. + You know, typically, this sort of thing would be a Working Group note which means it is a formal document published + by the W3C that has a certain level of review. There is even opportunities for versions that have had more review + and sign off. I think that's one thing we may need to do. + +
++ I will talk briefly about the work that we're doing on the Web Content Accessibility Guidelines 3.0, sorry, the W3C + accessibility guidelines or WCAG3, it is a substantial re-envisioning and it has been a clarifier since the + beginning that he want to address , that we want to address equity in the guidelines, how to make sure they're + equitable to People with Disabilities. We have explored that in certain ways in the Working Group really unpacking + that, understanding the relationship with equity, accessibility, bias and other dimensions. That's turning, you + know, we're connecting that with other work W3C has been doing to make itself more equitable of an organization and, + you know, this is to say that I believe that WCAG3 will have some structure built in and support resources + addressing the issues of bias specifically. + These are hopes, not promises, but, you know, that's the direction from activities like this. + +
++ >> CARLOS DUARTE: Thank you so much. + Those are exciting avenues we hope will come to fruition in the near future. + I guess final question for everyone, and it is I would like to know a bit about your future perspectives on the use + of Natural Language Processing for the field or in the field of accessibility. + I will start with you this time, Amy. + +
++ >> AMY PAVEL: I think this is an exciting area. + One thing, one shift I have found recently among people in NLP I talk to, as models are getting better at creating + fluent text that looks reasonable that a lot of people are becoming more interested in what are the actual + applications of this and how can we build tools that support these applications, rather than relying on automated + metrics and that may not capture people's experiences. + I wanted to note that that's a direction that I find exciting. + A couple of things that could be promising and I mentioned them in other response, you know, as we gain the ability + to describe more and more about the image, I think that NLP can provide a really good opportunity to personalize + those descriptions based on the person and what they want as well as the context. + There is, if you think about walking in a room, there is so much you could possibly describe, if we could make it + easier for people getting the information they're looking for quickly from their media, that would be a great + improvement and, you know, combining computer vision to recognize things in the underlying image and using something + like NLP to summarize that description I think is promising and exciting. + +
++ Another way I think I'm excited about it, it is opportunities to maybe help people with their own description tasks. + When we have humans working on descriptions, it is really hard. You know, novices sometimes have a hard time + remembering and applying the guidelines that exist. Maybe we could rewrite people's descriptions of videos to be + more in line with how an expert would write them by making them more concise or changing the grammar a bit so that + it fits what people are expecting from their guidelines, or we may alert people to aspects of their own descriptions + that may need, that could be changed a little bit to perhaps reduce something like bias that they have in a + description. + There is really lots of exciting opportunities in terms of authoring descriptions as well as making the end + descriptions a little bit better. + Yeah. + +
++ >> CARLOS DUARTE: Great, thanks a lot. Shivam. +
++ >> SHIVAM SINGH: I see a bit more of an opportunity now than earlier because now model engines are advanced. + I see a good context aware solution giving you faster processing of data and it works on text, videos and audio. + This could be a reality. A good use case I have been following also, it is how to make the academic textbooks, + academic assignments, they have multiple graph, all associated data, if some models could create better + understanding of those things, it would help a lot of people in understanding who has difficulties, or maybe in + absence of good quality descriptions of these charts, I see this happening in the next few years. As a closing + comment, I would say there are different sets of consumers of media, right. Some can read but not comprehend, some + can comprehend easily and have difficulty consuming it visually. In that sense, the coming, NLP technology, it would + help designers of contextual description of outputs and that I will say in simple terms, if you give me a simple + efficient output that's familiar, aesthetic, it would be the pinnacle of what I see as NLP. These are for natural + language processing, understanding as well as generation for all technologies. + +
++ >> CARLOS DUARTE: Thank you. Exciting times ahead, definitely. + Michael, you want to share your vision? + +
++ >> MICHAEL COOPER: Based on my knowledge of how machine learning and how present day works, the tools tend to be + more focused on specific abilities which means that the context is isolated. I think I'm speaking more of the person + working in the field, recognizing a need that may not be a technological potential, but in the Internet of Things + used as APIs to exchange data between different types of devices and if we can model some structure and share + context with each other, the tools, and negotiate a better group description, I think that may be an opportunity for + early evolution of this field. + Long term, of course, tools will emerge with greater sense of context built in, but that will probably be another + tier/similarity/whatever, that's my view on the near term future based on my knowledge. + +
++ >> CARLOS DUARTE: Good suggestions to look at also. + Shaomei. + +
++ >> SHAOMEI WU: Yeah. + Looking into the future, I can see kind of two areas that I think will have a lot of potentials. + First one, it is from the technology perspectives which I agree with my colleagues, I see a lot of gain in kind of + incorporating the context surrounding photos and by taking advantage of the recent progress in deep learning models + that handles the modal representations spaces. So we can embed both the image as well as the kind of text + surrounding it and then going through the metadata, the author, you have the time where the photo was taken or + posted. A lot of those, they can be kind of joined and represented in a sufficient space that provides us a lot more + than just visual information alone. I think that's kind of a big technology breakthrough that we can see in the near + term future. The kind of second thing, I think it is more important to me, it is a use case perspectives. I think + right now when we think about or talk about the media accessibility, we are mostly thinking about the consumption + case, how do we help somebody that cannot see to kind of, to consume the photos that are posted by others, and + mostly posted by sighted folks. I think it is equally important but largely kind of overlooked, it is a media + creation use cases, so how can we support people with visual impairment to create and to kind of share photos and + videos. + +
++ In my own work, into these use cases, which is why there is such a gap in what the current technology can do, for + example, all of the modern AI models, they really failed when it came to processing photos taken by people with + visual impairments because they're just not in the same kind of photo that are used and share. You know, there is a + huge gap in what kind of current, like, the current fundamentals of those models and what they can do. + Then second, it is there is a need for a lot more personalized and aesthetic needs. I take 10 selfies, I want to + find the one that I want to post to kind of share who I am and that is , we cannot do, you know, we can kind of tell + you, okay, you have ten photos, and they are all kind of containing your face inside, but then how can we share the + models that really kind a represent somebody's taste and somebody's kind of aesthetics and that's another + interesting future development that I want to see. That's all. + +
++ >> CARLOS DUARTE: Thank you so much, Shaomei Wu. + I think we only have 4 minutes more. I won't risk another question because we need to end at the top of the hour. I + will take the opportunity to once again thank our panelists. I hope everyone enjoyed it as much as I did. It was + really interesting, very optimistic perspectives so that we can see that it is not just the more risky or risk + enabling outputs that AI can have. + It is nice to have this perspectives. + Thank you once again, Shivam Singh, Amy Pavel, Shaomei Wu, Michael Cooper, it was brilliant to have you here. + +
+Thanks, everyone who attended. We'll be back tomorrow starting at the same time, 3:00 p.m. Central European time. I + thank you to those attending especially on the West Coast of the U.S. where it is really early and also India, I + guess it is the other way around, right, where it is really late, Shivam, thank you all for joining. + So as I was saying, tomorrow we'll start at the same time, we'll have another two panels, first panel on machine + learning for web accessibility evaluation and in the second panel we will come back to the topic of natural language + processing but now focusing on accessible communication and we'll close with what I'm sure will be another really + interesting keynote, from Shari, and looking forward to a similar discussion between Shari and Jutta Treviranus at + the end of the keynote. Thank you again, Jutta Treviranus, for your informative, provoking keynote. I hope to see + you all tomorrow. + Good bye! +
+ + + + \ No newline at end of file diff --git a/pages/about/projects/wai-coop/symposium2_day2_transcript.html b/pages/about/projects/wai-coop/symposium2_day2_transcript.html new file mode 100644 index 00000000000..f171d539e8e --- /dev/null +++ b/pages/about/projects/wai-coop/symposium2_day2_transcript.html @@ -0,0 +1,1620 @@ + + + + ++ >> CARLOS DUARTE: Hello, everyone, and welcome back to all of you who were with us yesterday. I am Carlos + Duarte. On behalf of the whole organizing team, I would like to welcome you to the second day of the Artificial + Intelligence and Accessibility Research Symposium. Welcome. This symposium is a joint effort by the WAI-CooP + Project and the Platform Architectures Working Group. I will start by taking this opportunity to thank the + European Commission that funds the WAI-CooP Project. + +
++ Before we get going, I want to give you some important + reminders and a few points about the logistics of the meeting. + By taking part in this symposium, you agree to follow the W3C Code of Ethics and Professional Conduct and be + sure to promote a safe environment for everywhere in this meeting. Also, today's session is being video-recorded + and transcribed. The transcription will be posted on the symposium website later. If you object to being + transcribed, we ask you to refrain from commenting. + Audio and video are off by default. Please turn them on only if requested, and turn them off again, when no + longer needed. During the keynote, presentations and panel discussions, you can enter your questions using the + Q&A tool on Zoom. Speakers will monitor the Q&A, and they might answer your questions, either live, if time + allows it, or directly in the system. + Let me draw your attention to a couple of features in the Q&A tool. You can comment and up vote any question in + there. That will make it easier for us to address any questions in the session. You can use the chat feature to + report any technical issues you are experiencing; we will monitor the chat and try to assist you if needed. + If during the seminar your connection drops, please try to reconnect. If the whole meeting is disrupted, you + won't be able to reconnect, we will try to resume the meeting for a period of up to 15 min if we are + unsuccessful we will contact you by email with further instructions. +
++ Now, I will finish by introducing you to today's agenda. And we will start with the first of two panels. This + panel will focus on how machine learning or other AI techniques can be used in the context of Web Accessibility + evaluation. After a 10 minute coffee break, we will have our second panel. + This panel will address natural language processing, which we already discussed yesterday, but now from the + perspective of accessible communication. + And, finally, we will have our closing Keynote by Shari Trewin. Now, let's move onto the first panel. Let me + just stop sharing and invite our panelists to join. Turn their video on and join me. + +
++ >> CARLOS DUARTE: So, we have Willian Massami Watanabe, from the Universidade Tecnologica Federal do Parana in + Brazil. We have Yeliz Yesilada from the Middle East Technical University. We have Sheng Zhou from Zhejiang + University in China. I hope I pronounced it correctly. And Fabio Paterno from CNR-IST, HIIS Laboratory in Italy. + Okay. Thank you all for joining us. And for some of you, it is earlier in the morning. For others of you it is + later, well, for some of you, I guess, it is really late in the evening, so, thank you all for your + availability. +
++ Let's start this discussion on how, I would say, current machine learning algorithms and current machine + learning applications can support or can improve methodologies for automatically assessing Web Accessibility. + And, from your previous works, you have touched different aspects about how this can be done. So, machine + learning has been used to support Web Accessibility Evaluation through different aspects, just a sampling, such + as metrics, such as evaluation prediction and handling dynamic pages. + I understand not all the domains you have worked on those, but some of you have worked on specific domains, so, + I would like you to focus on the ones that you have been working more closely in. For us to start, just let us + know, what are the current challenges that prevent further development and prevent further use of machine + learning or other AI techniques in these specific domains. Okay? I can start with you, Willian. +
++ >> WILLIAN WATANABE: First of all, thank you so much for everything that is being organized. Just to give you + some context, I am Willian, I'm a professor here in Brazil, where I work with web accessibility. My research + focuses is on web technology, the ARIA specification, to be more specific, and just in regard to everything that + has been said by Carlos Duarte, my focus is on evaluation prediction, according to the ARIA specification. I + believe the main, I was invited to this panel considering my research on identification of web elements on web + applications. The problem I address is identifying components in web applications, when we implement web + applications, we use same structured language such as HTML. My job is to find what these elements in the HTML + structure represent in the web page. Like, they can represent some specific type of widget, there are some + components, some landmarks that we need to find on the web page, and this is basically what I do. +
++ So, what I have been doing for the last year, I have been using machine learning for identifying these elements. + I use supervised learning and I use data provided by the DOM structure of the web application. I search for + elements in the web page and I cross file them as an element or a widget or anything else. The challenge in + regards to that, they are kind of different from the challenges that have been addressed yesterday. + Yesterday applications of machine learning, I think they work with video and text that are unstructured data, so + they are more complicated, I would say. The main challenge that I address in my research is associated to data + acquisition, data extraction, identifying what kind of features I should use to identify those components in web + applications. + Associated to that I should say, to summarize, my problems are associated with the diversity of web + applications, there are different domains and this kind of bias, any dataset that we use, it is difficult for + me, for instance, to identify a number of websites that implement, that represent all the themes of websites + that can be used in web applications, variability in the implementation of HTML and JavaScript, and the use of + automatic tools to extract this data, such as WebDriver API, the DOM structure dynamics, annotation observers. + There are a lot of specifications that are currently being developed that I must use, and I always must keep my + observing to see if I can use them to improve my research. + And, lastly, there is always the problem of manual classification in AI for generating the datasets that I can + use. That is it, Carlos. Thank you. + +
++ >> CARLOS DUARTE: Thank you, Willian. Thank you for introducing yourself, because I forgot to ask all of you to + do that. So, in your first intervention, please give us a brief introduction about yourself and the work you are + doing. So, Yeliz, I will follow with you. +
++ >> YELIZ YESILADA: Hi, everybody. Good afternoon. Good afternoon for me. Good afternoon, everybody.I'm Yeliz, + I'm associated professor at Middle East Technical University in Northern Cyprus Campus, I've been doing + accessibility web research for more than 20 years now. Time goes really fast. Recently I have been exploring + machine learning and AI, specifically, for Web Accessibility, supporting Web Accessibility from different + dimensions. +
++ Regarding the challenges, I think there are, of course, many challenges, but as Willian mentioned, I can + actually say that kind of the biggest challenge for my work has been data collection. So, I can actually say + that data, of course, is critical, as it was discussed yesterday in the other panels. Data is very critical for + machine learning approaches. + For us, collecting data, making sure that the data is representing our user groups, different user groups, and + not biasing any user groups, and also, of course, preparing and labeling the data, certain machine learning + algorithms, of course, supervised ones, require labeling. Labeling has also been a challenge for us, because + sometimes certain tasks, it is not so straightforward to do the labeling. It is not black and white. So, it has + been a challenge for us, I think, in that sense. +
++ And the other two challenges I can mention, I think the second one is the complexity of the domain. When you + think about the Web Accessibility, sometimes people think, oh, it is quite straightforward, but it is actually a + very complex domain. There are many different user groups, different user requirements, so, understanding those + and making sure that you actually address different users and different requirements is quite challenging. + And, since we also are working, this is last one that I wanted to mention, since we are also working with web + pages, they are complex. They are not well designed or well properly coded. As we always say, browsers are + tolerating, but for developing algorithms, machine learning algorithms, they also have to deal with those + complexities, which makes the task quite complex, I think. + So, just to wrap up, I think, in my work, there are three major challenges. Data, or the lack in quality of + data. Complexity of the domain, different users and different user requirements. And the complexity of the + resources we are using. So, web pages, the source codes and the complexity of pages that are not conforming to + standards. I think they are really posing a lot of challenges to algorithms that we are developing. + So, these are all I wanted to say. + +
++ >> CARLOS DUARTE: Thank you, Yeliz. A very good summary of major challenges facing everyone that works in this + field. So, thank you for that. Sheng, I wanted to go with you next. +
++ >> SHENG ZHOU: Thank you, Carlos. Hi, everyone. I am Sheng Zhou from Zhejiang University in China. From my point + of view, I think three challenges occurs currently now, first, I totally agree that it is hard to compare labels + for model training. Since the success of machine learning heavily relies on a large number of label data, + however assessing these label data usually takes a lot of time, which is hard to realize, especially in the + accessibility domain. + I want to take a, I am sorry, I am a little bit nervous here. Sorry. I want to take the WCAG rule, the image or + text, as an example, as we discussed in the panel yesterday. Most of the current image captioning or OCR methods + are trained on images dataset, rather than the image, like a logo that is essential in text alternative, the + label for Web Accessibility solution should fully consider the experience of different populations. There are + very few datasets that are specifically designed for the accessibility of evaluation tasks and satisfies the + requirements. + So, the machine learning models that are trained, or traditional models cannot be aware, generalized to + accessibility evaluation. Second one, I think, about the web page sampling, since I had a little bit of work on + this, I think currently, there are much factors that affect the sampling subject. + +
++ First, sampling has been a fundamental technique in Web Accessibility Evaluation when dealing with millions of + pages. The previous page sampling methods are usually based on the features of each page of such elements or the + DOM structure. Similar features are assumed to generated by the same development framework and have similar + accessibility problems. + However, with the fast growth of web development framework, pages have developed with diverse tools, for + example, pages that look very similar may be developed by totally different framework, and some pages that look + totally different may be developed by the same framework. This poses great challenges for feature-based web + accessibility evaluations. It is necessary to incorporate more factors into the sampling process, such as the + connection typology among pages, and visual similarity, and typesetting. So, how to identify similarity between + pages considering multiple factors into a unified sampling probability is critical for sampling. + I think this can be a problem that the literature, the graph typology, could try to understand, and metric + learning, which is a comprehensive research program. + +
++ So, the third one, the third challenge, I think is the subjective evaluation rules. When we evaluate the Web + Accessibility, there are both subjective and objective rules, right? So, for example, when evaluating the WCAG + success criteria 1.4.5, image of text, the image is expected to be associated with accurate description texts + which has been discussed in the panel yesterday. + It is still challenging to verify the matching between the (speaking paused) + +
++ >> CARLOS DUARTE: I guess there are connection issues? Let's see. Okay. He has dropped. We will let Sheng, okay, + he is coming back, so, you are muted. +
++ >> SHENG ZHOU: Sorry. +
++ >> CARLOS DUARTE: It is okay. Can you continue? +
++ >> SHENG ZHOU: Okay, okay. I am so sorry. I think there are three challenges under the first challenges, as same + as Yeliz described, it is hard to… +
++ >> CARLOS DUARTE: You dropped when you were starting to talk about the third challenge. +
++ >> SHENG ZHOU: Okay. +
++ >> CARLOS DUARTE: We got the first and second challenge. We heard that loud and clear, so now you can resume on + the third challenge. +
++ >> SHENG ZHOU: Okay, okay. So, the third challenge is the subjective evaluation rules. There are both subjective + and objective rules. For example, when evaluating the WCAG success criteria 1.4.5 image of text, the image is + expected to be associated with accurate description text, as discussed in the panel yesterday, it is still + challenges to verify whether the matching between image with text since we do not have access to the ground + truth of the text of image. So, I think (video freezing) +
++ >> CARLOS DUARTE: Apparently, we lost Sheng again. Let's just give him 10 seconds and see if he reconnects, + otherwise we will move on to Fabio. + Okay. So, perhaps it is better to move on to Fabio and get the perspective of also someone who is making an + automated accessibility evaluation tool available, so it is certainly going to be interesting, so, Fabio, can + you take it from here? + +
++ >> FABIO PATERNO: Yes. I am Fabio Paterno. I'm a researcher in the Italian National Research Council where I + lead the Laboratory on Human Interfaces in Information Systems. We have now a project funded by the National + Recovery and Resilience Plan which is about monitoring the accessibility of the public administration websites. + In this project we have our tool, MAUVE, which is a tool open, freely available, and it has already more than + 2000 registered users. Recently, we performed the accessibility evolution of 10000 websites and considered + grounded pages for each website, obviously it was an effort .So, we were very interested in understanding how + machine learning can get passed in this larger scale monitoring work. So, for this panel, I did a systematic + literature review, and I went to the ACM digital library, I entered machine learning and accessibility + evaluation to see what has been done so far. I got only 43 results, which is not too many, I would expected + more, and actually only 18 actually applied because other works were more about machine learning can be + interesting in future work and so. To say the specific research effort has been so far limited in this area. And + another characteristic was that there are other valid attempts. There are people trying to predict web site + accessibility based on the accessibility of some web pages, others trying to check the rules of the alternative + description, and trying to make the user control the content areas. So, I would say challenge is, well, machine + learning can be, you know, used for a complementary support to automatic tools that we already have. There are + many, in theory there are many opportunities, but in practice… there are a lot of progress. The challenge I + think is to find the relevant one with the accessibility features that are able to collect the type of aspect + that we want to investigate. +
++ And I would say the third and last main general challenge is that we really, really want to continuously work + with the changes not only the web but also how people implement, how people use the application, this + continuously change. So, there is also the risk that the dataset will become obsolete, not sufficiently updated + for addressing all the methods for that. +
++ >> CARLOS DUARTE: Okay, thank you for that perspective. Sheng, I want to give you now the opportunity to finish + up your intervention. +
++ >> SHENG ZHOU: Okay. Thank you, Carlos. Sorry for the lagging here. So, I will continue my third opinion of the + challenge. From my opinion, the third challenge is the subjectivity evaluation rules. In relation to Web + Accessibility, there are subjective and objective rules, and for example, when evaluating, an image to text + rule. The image is expected to be associated with the accurate description text. And as discussed in the panel + yesterday, it is still challenging to verify the matching between the image and the text, since there are no + ground truth of what kind of text should describe the image. + As a result of the accessibility evaluation system, it is harder to justify whether the alternative text really + matches the image. So, thanks. + +
++ >> CARLOS DUARTE: Okay, thank you. I will take it, from, I guess, most of you, well, all of you have in one way + or another mentioned one aspect of Web Accessibility Evaluation, which is conformance to requirements, to + guidelines. Several of you mentioned the web content Accessibility Guidelines in one way or another. Checking, + what we do currently, so far, and following up on what Sheng just mentioned, are objective rules. That is what + we can do so far, right? Then when we start thinking about, because the guidelines are themselves also subject + to subjectivity, unfortunately. How can we try to make the evaluation of those more subjective guidelines, or + more subjective rules, and how do you all think that Artificial Intelligence, algorithms, or machine + learning-based approaches can help us to assess conformance to those technical requirements to Accessibility + Guidelines? Okay? + I will start with you, now, Yeliz. + +
++ >> YELIZ YESILADA: Thank you, Carlos. So, regarding the conformance testing, so, maybe we can actually think of + this as two kinds of problems. One is the testing, the other one is confirming, basically repairing, or + automatically fixing the problems. + So, I see, actually, that machine learning and AI in general can, I think, help in both sides, in both parties. + So, regarding the testing and auditing, if we take, for example, WCAG Evaluation Methodology as the most + systematic methodology to evaluate for accessibility, it includes, for example, five stages, five steps. So, I + think machine learning can actually help us in certain steps. + +
++ For example, it can help us to choose a representative sample, which is the third step in WCAG-EM. We are + currently doing some work on that, for example, to explore how to use unsupervised learning algorithms to + decide, for example, what is a representative sample. Fabio, for example, mentioned the problem of evaluating a + large-scale website with millions of pages. + So, how do you decide, for example, which ones to represent, I mean, which ones to evaluate. Do they really, for + example, if you evaluate some of them, how much of the site you actually cover, for example. So, there, I think, + machine learning and AI can help. As I said, we are currently doing some work on that, trying to explore machine + learning algorithms for choosing representative samples, making sure that the pages that you are evaluating + really represent the site, and reduces the workloads, because evaluating millions of pages is not an easy task, + so maybe we can pick certain sample pages. + +
++ Once we evaluate them, we can transfer the knowledge from those pages to the other ones, because more or less + the pages these days are developed with templates or automatically developed, so, maybe we can transfer the + errors we identified, or the ways we are fixing to the others which are representative. + Regarding the step four in WCAG-EM, that is about auditing the select sample, so how do you evaluate as test the + sample, I think in that part, as we all know, and Sheng mentioned, there are a lot of subjective rules which + require human testing. So, maybe there we need to explore more how people, I mean, how humans evaluate certain + requirements, and how we can actually automate those processes. So, can we have machine learning algorithms that + learn from how people evaluate and assess and implement those. But, of course, as we mentioned in the first + part, data is critical, valid data, and quality of data is very critical for those parts. + +
++ Regarding the repairing, or automatically fixing certain problems, I also think that machine learning algorithms + can help. For example, regarding the images Sheng mentioned, we can automatically test whether there is an Alt + Text or not, but not the quality of the Alt Text, so maybe there we can explore more, do more about + understanding whether it is a good Alt Text or not, and try to fix it automatically by learning from the context + and other aspects of the site. + Or, I have been doing, for example, research in complex structures like tables. They are also very difficult and + challenges for accessibility, for testing and for repairing. We have been doing, for example, research in + understanding whether we can differentiate, and learn to differentiate a layout table from a data table, and if + it is a complex table, can we actually, for example, learn how people are reading that and guiding the repairing + of those. + We can, I guess, also do similar things with the forms. We can learn how people are interacting with these forms + and complex structures with the forms like reach and dynamic content like Willian is working on. Maybe we can, + for example, do more work there to automatically fix, which can be encoded in, let's say, authoring tools or + authoring environments, that include AI, without the developers noticing that they are actually using AI to fix + the problems. + So, I know I need to wrap up. I think I would say contributing two things, both testing and repairing can help. + +
++ >> CARLOS DUARTE: I agree. Some of the things you mentioned, they can really be first steps. We can assist a + human expert, a human evaluator, and take away some of the load. That is also what I take from the intervention. + So, Fabio, I would like your take on this, now. + +
++ >> FABIO PATERNO: I mean, I think ideally what Yeliz said before. We have to be aware of the complexity of + accessibility evaluation. Because just think about WCAG 2.1. It is composed of 78 success criteria, which are + associates with hundreds of techniques, specific validation techniques, so, this is the current state and it + seems like it is going to increase the number of techniques and so on. So, the automatic support is really + fundamental. + + +
++ And, secondly, when you use automatic support, the results of the check are to be ok, this pass, this fails, or + cannot tell. So, one possibility that I think would be interesting is how to explore machine learning in the + situation in which automatic solution is not able to deterministically provide an ok or fail, these could be an + interesting opportunity to also explore in other European projects. Ideally this would have a group, + accessibility, human accessibility expert, in this case to provide the input, and then to try to use this input + to train an intelligent system. And then if it was not possible to validate these solutions, but for sure, it + might be really easy for AI to detect whether an alternative description exists, but it is much more difficult + to say whether it is meaningful. +
++ So, in this case, for example, I have seen a lot of improvement of AI in recognizing images and the content + keys, I have also seen some of (Muffled audio). You can think in a situation in which AI provides the + descriptors and then there is some kind of similarity checking between these automatic generated descriptions + and the ones being provided by the developer and see in what extent these are meaningful. + This is something I think is possible, what I'm not sure is how much we can find a general solution. I can see + this kind of AI, associated with some level of confidence, and then I think is a part of the solution let the + user decide what should be level of confidence that is acceptable, when these automatic supporters use it to + understand the way the description is meaningful. So that would be the direction where I would try from a + perspective of people working on tools for automatic evaluation, trying to introduce AI inside of such an + automatic framework. But another key point we have to be aware of is the transparency. When we are talking about + AI, we are talking about the Blackbox, there is a lot of discussion about explainable AI. Some people say AI is + not able to explain why this data generated this result, or how can we change it to obtain different results + (Muffled audio), so this is a question that people encounter when they happen to run an evaluation tool. + +
++ And also, in addition to the study about the transparency of tools, the tools that are now available, it was + published on ACM Transactions in computing anything about that often these tools are a little bit Blackboxes, + they are not sufficiently transparent. For example, they say, we support these success criteria, but they do not + say which techniques they actually apply, how these techniques are implemented. So, they say that often the + users are in disadvantage because they use different tools and get different results, and they do not understand + the reason for such differences. + + Let's say this is point of transparency is already for now, with such validation tools that do not use AI. We + have to be carefully that if it is added AI it should be added in such a way that is explainable, so we can help + people to better understand what happened in the evaluation and not just give the results without any sufficient + explanation. + +
++ >> CARLOS DUARTE: I think that is a very important part, because if I am a developer, and I am trying to solve + accessibility issues, I need to understand why is there an error, and not just that there is an error. That is a + very important part. Thank you, Fabio. + So, Sheng, next, to you. + +
++ >> SHENG ZHOU: Thanks. Incorporating the artificial intelligence, I will try to find some way to help the + developers. First of all is the code generation for automatically fixing the accessibility problems. As Yeliz + just said, always web accessibility evaluation has been targeted, but we have to stand at the view of the + developers. + If it is the evaluation system only identifies or located the accessibility problem, it may be still hard for + developers to fix these problems since some developers may lack experience on this. And the recently artificial + intelligence based code generation has been well developed and give some historical code of fixing accessibility + problems. We have tried to train artificial intelligence model to automatically detect the problem, make a code + snippet, fix the problem code and provide suggestions for the developers. We expect this function could help the + developers fix the accessibility problem and improve the websites more efficiently. + +
++ And the second reason for the developer is the content generation. As discussed in the panel yesterday, there + have been several attempts in generating text for images or videos with the help of the computation vision and + NLP techniques. It may not be very practical for the images generators to provide an alt text since the state of + art methods requires large models deployed on GP servers which is not convenient for frequently updated images. + Recently we have been working on some knowledge distillation method, which aims at distilling a lightweight + model from a large model. We want to develop a lightweight access model that can be deployed in the broader + extension, or some like lightweight software. We hope to reduce the time cost and competition cost of image + providers and encourage them to conform to the accessibility technique or requirements. Okay. Thank you. + +
++ >> CARLOS DUARTE: Thank you. That is another very relevant point. Make sure that whatever new techniques we + develop are really accessible to those who need to use them. So the computational resources are also a very + important aspect to take into account. So, Willian, your take on this, please. +
++ >> WILLIAN WATANABE: First, I would like to take from what Yeliz said, that we have basically, it is nice to see + everyone agreeing, before we didn't talk at all so it is nice to see that everywhere is having the same + problems. And, about what Yeliz said, she divided the work into automatic evaluation into two steps. The first + one is testing, and the second one is automatically repairing accessibility in websites. + From my end, specifically, I don't work with something, I would say subjective, like image content generation. + My work mostly focuses on identifying widgets, it is kind of objective, right? It is a dropdown, it is not a + tooltip… I don't need to worry to be sued over a bad classification, or something else. So, that is a different + aspect of accessibility that I work on. + Specifically, I work with supervised learning, as everyone, I classify the elements as a specific interface + component. I use features extracted from the DOM structure to, I think everyone mentioned this, Sheng mentioned + it, as well, Yeliz mentioned the question about labels and everything else. + +
++ I am trying to use data from websites that I evaluate as accessible to enhance the accessibility of websites + that I don't, that don't have these requirements. For instance, I see a website that implements rules, that + implements the ARIA specification. So, I use it. I expect data from it to maybe apply it on a website that + doesn't. This is kind of the work that I am working, this is kind of what I am doing right now. + + +
++ There is another thing. So, Fabio also mentioned the question about confidence. I think this is critical for us. + In terms of machine learning, I think the word that we use usually is accuracy. What will guide us, as + researchers, whether we work on test or automatically repair is basically the accuracy of our methodologies. If + I have a lower accuracy problem, I will use a testing approach. Otherwise, I will try to automatically repair + the web page. Of course, the best result we can get is an automatic repair. This is what will scale better for + our users, ultimately offer more benefit in terms of scale. + I think that is it. Everyone talked about everything I wanted to say, so this is mostly what I would say + differently. This is nice. +
++ >> CARLOS DUARTE: Okay. Let me just, a small provocation. You said that, in your work, everything that you work + with widget identification is objective. I will disagree a little bit. I am sure we can find several examples of + pages where you don't know if that is a link or a button, so there can be subjectivity in there, also. So, yes. + But just a small provocation, as I was saying. +
++ So, we are fast approaching, the conversation is good. Time flies by. We are fast approaching the end. I would + ask you to quickly comment on the final aspect, just one minute or two, so please try to stick to that so that + we don't go over time. + You have already been in some ways approaching this, but just what do you expect, what would be one of the main + contributions, what are your future perspectives about the use of machine learning techniques for web + accessibility evaluation. I will start with you now, Fabio. +
++ >> FABIO PATERNO: Okay. If I think about a couple of interesting, you know, possibilities opened up, about + machine learning. When we evaluate a user interface, generally speaking we have two possibilities. One is to + look at the code associated, the generated interface and see whether it is compliant with some rules. And + another approach is to look at how people interact with the system. So, look at the levels of user interaction. + In the past we did some work where we created a tool to identify various usability patterns, which means + patterns of interaction that highlight that there is some usability problem. For example, we looked at mobile + devices where there's a lot of work on (?) machine, that means that probably the information is not well + presented, or people access computers in different (?) it means the (?) are too close. So, it is possibly to + identify a sequence of interaction that highlight that is some usability problem. So, one possibility is to use + some kind of machine learning for classifying interaction with some Assistive Technology, that highlights this + kind of problem. So, allow us from the data (?), yes, there are specific accessibility problems. +
++ And the second one is about, we mentioned before, the importance of providing an explanation about a problem, or + why it is a problem, and how to solve. So, that would be, the idea in theory, an ideal application for a + conversational agent. Now there is a lot of discussion on this, about ChatGTP, but is very difficult to actually + design, in this case, a conversational agent that is able to take into account the relevant context, which in + this case is the type of user that is actually now asking for help. Because now there are really many types of + users, when people look at accessibility results, that can be a web commission, the person who decide to have a + service but doesn't know anything about its implementation, then the user, the developer, the accessibility + expert, each of them require a different language, different terms, a different type of explanation, because one + day, I look “is this website accessible?”. They really have different criteria in order to understand the level + of accessibility and how to operate it in order to improve it. So, this is one dimension of the complexity. +
++ The other dimension of the complexity is the actual implementation. It is really not, this (?) we are conducting + in our laboratory (?). It is really amazing to see how different implementation languages, technical components + that people use in order to implement the website. Even people that use the same JavaScript frameworks, they can + use it in very different ways. So, when you want to provide an explanation, of course, there is a point just + providing the standard, the description, the error, some of the standards examples, how to solve the problem, + because fften there are different situations that require some specific system consideration for explaining how, + or what can be done. But this complex conversational agent for accessibility, it would be a great result. +
++ >> CARLOS DUARTE: Thank you, Sheng? +
++ >> SHENG ZHOU: In the sake of time, I will talk about the future perspective about the efficient page sampling. + According to our data analysis, we found that the pages, the web pages with similar connection structures with + other pages visually have some similar accessibility problem. + So, we try to take this into account for the accessibility evaluation. And recently we used a graph knowledge + that works, that has been a hot research topic in the machine learning community. It combines both the network + topology and the node attributes, to an only unified representation for each node. Each node (frozen video) + +
++ >> CARLOS DUARTE: Okay. I guess we lost Sheng again. In the interest of time, we will skip immediately to you, + Willian. +
++ >> WILLIAN WATANABE: Okay. My take on this, I think it will be pretty direct. I think Fabio talked about it, but + we are all working with specific guidelines, a set of Accessibility Guidelines of WCAG. And I think the next + step that we should address is associated to generalization, and incorporating it into relevant products, just + incorporating any automatic evaluation tool. So, in regard to all the problems that we mentioned, data + acquisition, mental classification, we had to find a way to scale our experiment so that we can guarantee it + will work in any website. +
++ In regards to my work, specifically, I think that are some, I’m trying to work on automatic generation for + structure websites, for instance, generating heading structures and other specific structures that users can use + to righteous and automatically enhance the accessibility of the web page. I think that is it. In regard to what + you said, Carlos, just so that I can clear myself, what I wanted to say is that, different from the panelists + from yesterday, and different from Chao, for instance, I think I am working with a similar machine learning + approach. I don't use deep learning, for instance. Since I don't see the use for it yet, in my research, because + for my research I think it is mentioned that she might use for labeling and other stuff, data generation. I + haven't reached that point yet. I think there are a lot of things we can do just with classification, for + instance. That is it. +
++ >> CARLOS DUARTE: Okay, thank you, Willian. Yeliz, do you want to conclude? +
++ >> YELIZ YESILADA: Yes. I actually, at least I hope, that we will see developments, again, in two things. I + think the first one is automated testing. I think we now are at the stage that we have many tools and we know + how to implement and automate, for example, certain guidelines, but there are a bunch of others that they are + very objective, they require human evaluation. It is very costly and expensive, I think, from an evaluation + perspective. + So, I am hoping that there will be developments in machine learning and AI algorithms to support and have more + automation in those ones that are really now requiring a human to do the evaluations. + And the other one is about the repairing. So, I am also hoping that we will also see developments in automating + the kind of fixing the problems automatically, learning from the good examples, and being able to develop + solutions while the pages are developed, they are actually automatically fixed. + And, sometimes, maybe seamless to the developers so that they are not worried about, you know, certain issues. + Of course, explainability is very important, to explain to developers what is going on. But I think automating + certain things there would really help. Automating the repairment. Of course, to do that I think we need + datasets. Hopefully in the community we will have shared datasets that we can all work with and explore + different algorithms. As we know, it is costly. So, exploring and doing research with existing data, it helps a + lot. + +
++ So, I am hoping that in the community we will see public datasets. And, of course, technical skills are very + important, so human-centered AI I think is needed here and is also very important. So hopefully we will see more + people contributing to that and the development. And, of course, we should always remember, as Jutta mentioned + yesterday, the bias is critical. + When we are talking about, for example, automatically testing, automating the test of certain rules, we should + make sure we are not bias with certain user groups, and we are really targeting everybody in different user + groups, different needs and users. + So, that is all I wanted to say. + +
++ >> CARLOS DUARTE: Thank you so much, Yeliz. And also, that note I think is a great way to finish this panel. + So, thank you so much, the four of you. It is really interesting to see all those perspectives and what you are + working on and what you are planning on doing in the next years, I guess. + +
++ Let me draw your attention. There are several interesting questions on the Q&A. If you do have a chance, try to + answer them there. We, unfortunately, didn't have time to get to those during our panel. But I think that are + some that really have your names on it. (Chuckles) So, you are exactly the correct persons to answer those. So, + once again, thank you so much for your participation. It was great. +
++ We will now have a shorter break than the ten minutes. And we will be back in 5 minutes. So, 5 minutes past the + hour. +
++ >> CARLOS DUARTE: Hello, everyone. Welcome back to the second panel. I am now joined by Chaohai Ding from the + University of Southampton, Lourdes Moreno of the Universidad Carlos III de Madrid in Spain, and Vikas Ashok from + the Old Dominion University in the US. It is great to have you here. As I said before, let's bring back the + topic of natural language processing. + We have addressed yesterday, but not from the perspective of how it can be used to enhance Web Accessibility on + the web. + +
++ So, now, similarly to what I’ve done in the first panel, you have been working on different aspects of this + large domain of accessible communication. You have pursued advances in machine translation, in Sign Language, + AAC, so from your perspective and your focus on the work, what are the current challenges that you have been + facing and that are preventing the next breakthrough, I guess. + Also, I would like to ask you to, for your first intervention, also, to do a brief introduction to yourself and + what you have been doing. Okay? + I can start with you, Chaohai. + +
++ >> CHAOHAI DING: Hi. Thank you for having me today. I am a senior research fellow at the University of + Southampton. My research interest is on AI and inclusion, which includes Data Science and AI tactics to enhance + accessible learning, travelling and communication. + So, yes, we use, AI has been widely used in our research to support accessible communication. Currently we are + working on several projects on AAC. For example, we applied the concept map, not the single knowledge graph, to + interlinking AAC symbols from different symbol sets. This can be used for symbol-to-symbol translation. And we + also adapted a NLP model to translate the AAC single sequence into spoken text sequence. + +
++ So, those are the two projects we are working on currently. We are also working on an accessible e-learning + project that applies the machine translation to provide transcripts from English to other languages for our + international users. So, that is another scenario we are working with machine translation for accessible + communication. + So, there are a few challenges we have identified in our kind of research. The first one is always the data. + Data availability and data optimality. So, as you know, NLP models are a large amount of data, especially for + AAC. + +
++ We are, well, one of the biggest challenges is the lack of data, like user data, AAC data, and also how the user + interacts with the AAC. So, also, we have several different AAC single sets used by the different invidious + which makes it very difficult to develop NLP models, as well, because the AAC symbols are separated from each + single set. And another challenge, the lack of data interoperability in AAC symbol sets. + The third challenge we are identifying is the inclusion. Because we are working on AAC single sets from Arabic, + English and Chinese. So, there are cultural and social differences in AAC singles, which is important to + consider the needs of different user groups under the cultural and social factors and to involve them in the + development of NLP models for AAC. + +
++ The first one is data privacy and safety. This has been identified in our web application from AAC symbols to + spoken text. So, how do we, if we want to more accurately, or more personalized application, we need the user's + information. So, the challenge is how do we store this personal information and how do we prevent the data + misuse and the bridge and how to make the tradeoff between the user information and the model performance. +
++ The last one is always the accessible user interface, and how to make this AI power tool, and NLP power tools + accessible for end users. And also there are more generic issues in AI like accountability, explainability. So I + think that is the list of challenges we have identified in our current research. Thank you. +
++ >> CARLOS DUARTE: Thank you. A great summary of definitely some of the major challenges that are spread across + the entire domain. Definitely. Thank you so much. Lourdes, do you want to go next? +
++ >> LOURDES MORENO: Thank you. Thank you for the invitation. Good afternoon, everyone. I am Lourdes Moreno, I + work as an Associate Professor in the Computer Science Department at the Universidad Carlos III de Madrid in + Spain. I am an accessibility expert. I have been working in the area of technology for disability for 20 years. + I have previously worked on sensory disability, and currently I work on cognitive accessibility. In my research + areas, I combine method from Human Computer Interaction and Natural Language Processing areas, to obtain + accessible solutions from the point of the view of reliability and the stability of the language in the user + interface. +
++ So, the question currently in natural language research is being developed at our language model, in recent + years there have been many advances due to the increase in resources, such as large datasets and cloud platform + that allow the training of large models. But the most crucial factor is the use of transforming technology, and + the use of transfer learning. These are method based on the learning to create language model based on the + neural network. They are universal models, but they support different natural processing language tasks. Such as + questions and answering and translations, summarization, speech recognition, and more. The most expensive use + models are the GPT from OpenAI, and Bearly from Google. But new and bigger models continue to appear, and out + outperform previous ones, because their performance continues to scale as more parameters are added to the + models and more data are added. +
++ However, despite these great advances, there are issues in the accessibility scope and challenges to address. + One of them is bias, language models have different type of bias, such as gender, race, and disability. But a + gender and race biases are highly analyzed. However, it isn't the case with disability biases. It has been + relatively under-explored. + There are studies related to these models, for example, in these words, in the sentiment analysis text, the + terms related to disability have a negative value. Or in another work, you see a model to moderate conversation + classifying text with mention to disability as more toxics. That is, algorithms are trained to give results that + can be offensive and cause disadvantage to individuals with disabilities. So, an investigation is necessary to + study that model to reduce biases. We cannot only use this language model and directly use the outcome. + +
++ Another problem with these models is that there aren't too many datasets related to the accessibility area. So, + this time there are a few labels corpora to be used in training simplification algorithms, lexical or syntactic + simplification, in natural language processing. I work in cognitive accessibility in Spanish, to simplify text + to plain language, easy reading language. To carry out this task we have created a corpus with expert initial + reading and with participation of older people and with People with Disabilities, intellectual disabilities, + because the current corpora have been created with non experts in disability and non experts in plain language + and they haven't taken into account people with disabilities. Also, efforts devoted to solving the scarcity of + resources are required in languages with low resources. English is the language we've more developed with many + natural language processing, but others, such as Spanish, have hadn't many resources. We need systems trained + for English language words and for Spanish, as well. + Finally with the proliferation of GPT models and its applications, such as ChatGPT, another problem to address + is the regulation and ethical aspect of Artificial Intelligence. + +
++ >> CARLOS DUARTE: Thank you so much, Lourdes. Definitely some very relevant challenges in there. Vikas, I will + end this first talk with you. +
++ >> VIKAS ASHOK: Thank you. I'm Vikas Ashok, from Old Dominion University, Virginia, in the United States. I have + been working, researching in the area of accessible computing for ten years now. My specialty focus area is + people with visual disabilities, so mostly concentrated on their accessibility, as well as usability needs, when + it comes to computer applications. +
++ So, with the topic at hand, which is accessible communication, so, one of the projects that I am currently + looking at is understandability of Social Media content, for people who listen to content, such as, you know, + people who are blind. So, listening to Social Media content text is not the same as looking at it. So, even + though the Social Media text is accessible, it is not necessarily understandable because of the presence a lot + of non-standard language content in Social Media, such as Twitter. People create their own words, they are very + inventive there. They hardly follow any grammar. So, text-to-speech systems such as those used in screen data, + cannot necessarily pronounce these out of vocabulary words in the right way. + Because most of the words, even though they are in text form, they are mostly intended for vision consumption, + some type of exaggeration where the letters are duplicated just for additional effect. Sometimes emotions are + attached to the text itself, without any emoticons or anything else. And sometimes to phonetically match it, use + a different with spelling of the word just for fun purposes. + +
++ So, as communication increases, tremendously with social media, people are depending on social media to + understand or getting news even, you know, some kind of disaster news or if something happens anywhere, some + event, they first flock the social media to get it. So, people that listen to content should also be able to + easily understand. I am focusing on that area, how to use NLP to make this possible. Even though this is not + exactly a question of accessibility in a conventional sense, but it is more like accessibility in terms of being + able to understand the already accessible content. So, it is one of the things. +
++ The other thing I am looking at that is related to this panel is the disability bias of natural language models, + especially those Large Language Models. So, unfortunately, these models are reflective of the data it is trained + on, because most of the data associates words that are used to describe People with Disabilities, somehow end up + having negative connotation, they use negative context. Nobody is telling the models to learn it that way, + except that the documents of the text corpus that these models are looking at inherently put these words that + are many times not offensive into the negative category. +
++ So, I am looking at how we can counter this. The example is toxicity detection in discussion forum, online + discussion forums are very popular. People go there, sometimes anonymously, post content, interact with each + other. You know, some of the posts get flagged as, you know, toxic, or they get filtered out. So, even if they + are not toxic, because of the use of certain words to describe disabilities or something. So, we want to avoid + that. How can we use an NLP to not do that. These two projects are what are closely related to the panel + specifically to this session. +
++ >> CARLOS DUARTE: Thank you, Vikas. I will follow up with that, with what you mentioned and Lourdes has also + previously highlighted, the disability bias. I am wondering if you have any ideas, suggestions on how can NLP + tools address such issues, I'm thinking for instance, text summarization tools, but also other NLP tools. How + can they help us address the issues of disability bias, also how can they explore other aspects like + accountability or personalization, in the case of text summaries. How can I personalize a summary for specific + audiences, or for the needs of specific people. I will start with you, Lourdes. +
++ >> LOURDES MORENO: Text summarization is a natural language task, is a great resource because improve cognitive + accessibility in order to help people with disabilities to process long and tedious texts. Also, In the Web + Content Accessibility Guidelines, following success criteria 3.1.5 Reading Level, the readable summary is very + socially recommended. But these tasks have challenges, such us disability biases, and the summaries that are + generated and are not understandable for people with disabilities. Therefore, some aspects must be taken into + account. It is necessary to approach these tasks with a summary of the extract type, where the extract sentences + can be modified with paraphrasis resources, and help the understandability and reliability of the text. + To summarize this, different input are required. Not only knowledge about the sequences of word, not only about + sentences, but also about the targeted audience is important. Different types of users require different types + of personalization of summaries. + +
++ It was also, I think that it will be recommendable to include a readability metric in the summary generation + process to ensure that the result summary is minimally readable. For instance, if we are in the context of + assistant that provides summaries of public administration information for all people, it is necessary to take + into account that the summary must be in plain language. Therefore, in addition to extract relevant sentences + and paraphrases, it will necessary to include knowledge about guidelines of plain language to make the text + easier to read. +
++ Finally, corpora used to train natural language process assistants should be tested with the user in order to + obtain a useful solution. Only then it will be possible to obtain understandable summaries for all the society + and their elderly. + Then with respect to accountability, as in every Artificial Intelligence algorithm, it must be explainable. So, + it is necessary to respond, to answer, to questions such as how processing actually performed, a limitation of + the dataset use to train and test algorithms and these outcome of the model. + Therefore, good data management and machine learning models training practices should be promoted to ensure + quality results. Nothing else. + +
++ >> CARLOS DUARTE: Thank you, Lourdes. Vikas, do you want to, even though from what I understand you don't work + directly with text summarization, but how does this aspect of disability bias accountability, and + personalization impact what you are doing? +
++ >> VIKAS ASHOK: I use a lot of text summarization, so I can add to it. To add to what Lourdes said, + simplification is also as important at summarization because sometimes it is not just summarizing, or shortening + the content to be consumed, but it is also making it understandable, like I said. It means that certain complex + sentences structures and some more tricky words, we need to replace them with equal and easier to understand, + more frequently used words. There is some work there that has been done into text simplification, we created + some kind of text summarization, in this special case if from the same language, text between the same language, + so the input is text in the same language as the output text, except that the output text is more readable, more + understandable. So, that is extremely important. +
++ The other thing is summarization, most of them tend to rely on extractive summarization, where they just pick + certain sentences from the original piece of text so that they don't have to worry about the grammatical + correctness and proper sentence structures, so that because they rely on humans who have written the text in + order to generate the summaries. So I can speak how summarization need to be personalized in a certain way, for + certain groups, especially for people with visual disabilities. What I have noticed in some of my study is that, + even though they can hear it, they don't necessarily understand it, because the writing is sort of visual, in + other words it needs you to be visually imaginative. So, what is the non-visual alternative for such kind of + text. How do you summarize the text that includes a lot of visual elements to it? How do you convert it into + non-equal and non-visual explanations. This necessarily goes beyond the extractive summarization. You cannot + just pick and choose. You need to replace the wordings in the sentence with other wordings that they can + understand. Some of the text, you know, these days, especially the articles, in news articles and all, they + don't come purely as text. They are sort of multi-modal in the sense that there are pictures that are the GIFS, + everything, and the text sort of refers to these pictures. So, this is another problem, because then it becomes + highly visual. + So, you have to take some of the visual elements of the picture, probably through computer vision techniques or + something, and then inject it into the text in order to make it more self-sufficient and understandable for + people who cannot see the images. + So, that is my take on it. + +
++ >> CARLOS DUARTE: Yes. That is a very good point about the multimedia information and how do we summarize + everything into text. Yes. That is a great point. Chaohai, your take on this? +
++ >> CHAOHAI DING: Yes. We don't have must experience on text summarization. Most of our research is on AAC and + the interlinking of the AAC generation. But we do held a project that involves part of text summarization. We + constructed a knowledge graph for an e-learning platform and then we needed to extract the text summarization + from lecture notes to make it easier and accessible for people, students with disabilities. + So, based on that project, what we learned is that text summarization is a very difficult task in NLP, because + it is highly dependent on the text, context domain and target audience, and even the goal of the summary. + For example, in our scenario, we want to have a summary of each lecture notes, but we have very long transcripts + in that lecture. So, we use a few text summarization models to generate the summaries, but the outcome is not + good. + As Vikas just said, some of the text summarization is just pick some of the text, and replace some of the words. + That is it. And some of it doesn't make sense. So, that is one problem we identified in text summarization. + +
++ And we also have some method to, because we need to personalize, because the project is related to adapted + learning for individual students, so we need personalization for each student. So, text summarization could be + customized and adapted to a user's need. + But this actually can be improved with user's personal preference or feedback, and also allow the user to set + their summary goal and also, the simplification is very important, because some students may have cognitive + disabilities, or other types of disabilities that they need to have simplified into plain language. Yes, I think + that is mainly what we have for text summarization. + +
++ >> CARLOS DUARTE: Thank you so much. Let's move on to, we started with the challenges and now I would like to + move onto the future perspectives. What are the breakthroughs that you see happening, promoted by the use of NLP + for accessible communication. I will start with you, Vikas. +
++ >> VIKAS ASHOK: So, my perspective is that there are plenty of NLP in the tools out there already that haven't + been exploited to the fullest extent to address accessibility and usability issues. The growth in NLP techniques + and methods have been extremely steep in recent years. And the rest of us in different fields are trying to + catch up. + Still, there is a lot to be explored as to how they can be used to address real world accessibility problems, + and we are in the process of doing that, I would say. So, text summarization is one thing we discussed already + that can be explored in a lot of scenarios to improve the efficiency of computer interaction for People with + Disabilities. But the main problem, as we discussed not only in this panel, but also on other panels is the + data. + +
++ So, for some languages there is enough of that little corpus where the translation is good, because the + translation depends on how much data you have trained on. But for some pair of languages it may not be that + easy, or even if it does something, it may not be that accurate, so that could be a problem. + Then the biggest area where I see, which can be very useful for solving many accessibility problems is the + improvement in dialogue systems. So, national language dialogue is a very intuitive interface for many users, + including many People with Disabilities. + +
++ So, those are physical impairments which prevent them from conveniently using the keyboard or the mouse, and + those that are blind who have to use screen readers, which is time consuming , it is known to be time consuming. + So, dialog systems are under explored. They are still exploring it. You can see the commercialization is going + on, like with Smartphones and all, but still, with some high-level interaction, like setting alarms, turning on + lights and answering some types of questions, but what about using that to interact with applications in the + context of an application. + So, if I see a play, I had a user comment to this particular document text, say in Word or Docs. Can an + assistant spoken, dialog assistant, understand that, and automate it. So that automation I feel of address many + of the issues that people face interacting with digital content. So, that is one of the things I would say we + can use NLP for. + +
++ The other thing is the increased availability of Large Language Models, pre-trained models, like one Lourdes + mentioned, GPT, which is essentially a transformer decoder or generator base model. Then there is also Bert, + which is encoder based. So, this help us in a wat that we don't need large amount of data to solve problems + because they're already pre-trained on a large amount of data. So, what we need are kind of small datasets that + are more fine-tuned toward the problem here we are addressing. + So, the accessibility datasets, there I think there needs to be a little more investment. + It doesn't have to be that big, because the Large Language Models, already take care of most of the language + complexities. It is more like fine tuning to the problem at hand. So, that is where I think some effort should + go, and once we do that, obviously, we can fine tune and solve the problems, and then there is a tremendous + advancement in transfer learning techniques, of which we can explore that, as well, in order to not do that from + scratch, instead borrowing somethings that are already there, I mean, similar problem. There is a lot to be + explored, but we haven't done that yet. So, there is plenty of opportunity for research using NLP expertise for + problems in accessible communication, especially. + +
++ >> CARLOS DUARTE: Yes. Definitely some exciting avenues there. So, Chaohai, can we have your take on this? Your + breakthroughs? +
++ >> CHAOHAI DING: Yes, I totally agree with Vikas' opinions. For my research, because I mainly work with AAC, + currently, I would take AAC, for example. The future perspective for AAC and NLP for AAC, I think, first of all + would be the personalized adaptive communication for each individual. Because each individual has their own + communication, their own way to communicate with each other. + And NLP techniques can be used to make this communication more accessible, more personalized and adapted based + on their personal preference, feedback. + So, this can be used for personalized AAC symbols. Currently AAC users are just using standard AAC symbol sets + for their daily communication. So, how can we use NLP to, and the generic AI models to create more customized, + personalized AAC symbols. Which could be having the ability to adapt to their individual's unique culture and + social needs. + I think that is one potential contribution to AAC users. + +
++ The second one will be accessible multi modal communication. Because NLP techniques, they have the potential to + enhance this accessible communication by improving interoperability in training data, and the between the verbal + language, Sign Language and the AAC. + So, data interoperability can provide a more high-quality training data for this language with elastic set. + Additionally, it can provide the ability to translate different communication models and make it more accessible + and inclusive. So, in AAC, we can have multiple AAC symbol sets that can be linked, mapped and interlinked by + NLP models, and this can be contributed to translation between the AAC to AAC, and the AAC to text, AAC to Sign + Language and vice versa. That is the second perspective I think about. + +
++ And the third one is the AI assistant communication that Vikas just talked about, the ChatGPT. So, with this, + this large language model has been trained by these big companies and they have been widely spreading on the + Social Media. So, how to use this trained Large Language Models incorporated with other applications, then you + can use it for more accessible communication to help People with Disabilities. That is another future we are + looking for. +
++ The last one I am going to talk about is more regarding the AAC. AAC is quite expensive. So, affordability is + very important. It can be achieved by a NLP or AI. That is one thing I mentioned that we are currently looking + into how to turn image into symbols, and how to generate AAC symbols automatically by using image generative AI + models, like stable diffusion. So, that is another future we are looking forward, how to reduce the cost for + accessible communication. Thank you. +
++ >> CARLOS DUARTE: Thank you, Chaohai. Definitely a relevant point, reducing the cost of getting data and all of + that. That is important everywhere. So, Lourdes, what are you looking for in the near future? And you are muted. +
++ >> LOURDES MORENO: Sorry. As we mentioned before, there are two trends, the appearance of newer and better + language models than the previous one, working in these new models, and to reduce disability biases. Also, I + will list a specific natural language processing task and Data application that I will work in the coming years. + One is accessibility to domain specific task, such as health. The health language is highly demanded and needed, + but patients have problems understanding information about their health condition, diagnosis, treatment, and the + natural processing methods could improve their understanding of health related documents. Similarly, sample + appearance in legal and financial documents, the language of administration, government, … Current natural + process language technology that simplify and summarize this could help in the roadmap. +
++ Another line is speech-to-text. Speech-to-text will be a relevant area of research in the field of virtual + meetings in order to facilitate accessible communication by generating summaries of meetings, as well as minutes + in plain language. +
++ Another topic is the integration of natural language processing methods into the design and development of + multimedia use interface. It is necessary to face accessible communication from a multidisciplinary approach + between different areas, such as human computer interaction software engineering and natural language + processing. +
++ Finally, another issue is advancing application in smart assistant in natural language processing method to + support People with Disabilities and the elderly, assist them in their daily task and promote active living. +
++ >> CARLOS DUARTE: Thank you so much, Lourdes, and every one of you for those perspectives. I guess we still have + five minutes more in this session. So, I will risk another question. I will ask you to try to be brief on this + one. + But, the need for data was common across all your interventions. And if we go back to the previous panel, also, + it was brought up by all the panelists. So, yes, definitely, we need data. + What are your thoughts on how can we make it easier to collect more data for the specific aspect of accessible + communication, because we communicate a lot, right? Technology has allowed us, opened up several channels to + where we can communicate even when we are not co-located. + So, yes, everywhere one of us is in different parts of the planet and communicating right now. Technology has + improved that possibility a lot. + However, we always hear that we need more data, we can't get data. So, how do you think we can get more data? + And, of course, we need the data to train these models, but can't we also rely on these models to generate data? + So, let me just drop this on you now. Do any of you want to go first? +
++ >> CHAOHAI DING: I can go first. Yes. We have been working on open data four years ago, I mean the AI and the + Data Science, because when I started my PhD we worked on open data and there is an open data initiative in the + UK. We wanted to open our data, government data, and the public transport data. That is how long I have been + working on public transportation with accessibility needs. So, there is a lack of data. At the beginning of my + PhD, so, a few years later we still lack accessibility information data. + So, how can we, how this, I mean, in the accessibility area, how can we have such a data to train our models? + What I used to do with public transport data, I used to map available data into a larger dataset. That's + incurred into a lot of label work like cleaning, data integration, and all this method to make this data + available. That is the first approach. + +
++ Secondly, we think about how can we contribute like a data repository, or something like an image net or a word + net that we can collaboratively to, together, to contribute to identify data related to accessibility and + research. I think that is a way, as a community, we can create such a universal repository or some kind of data + initiative that we can work on accessibility research. +
++ Then the third approach is that definitely we can generate data based on small data. We can use generative AI + model to generate more, but the question is, is that data reliable? Is the data generating enough or is that + have been bias? That is my conclusion. Thank you. +
++ >> CARLOS DUARTE: Yes. Thank you. I think the big question mark is that synthetic data reliable or not. Vikas or + Lourdes, do you want to add something? +
++ >> VIKAS ASHOK: Yes. I have used synthetic data before based on the little bit of real data. And in some cases, + you can generate synthetic data. One of the things I had to do was extract user comments in documents. Most of + this word processing applications allow you to post comments to the right, for your collaborators to look at and + then, you know, address them. So, automatically extracting that, I had to generate synthetic data, because + obviously only a few documents with collaborative comments. So, the appearance there is like, okay, comments + will appear somewhere on the right side, right corner, which will have some text in it with a few sentences, so + there are some characteristics. So, in those cases we were able to generate synthetic data, we train the machine + learning model. It was pretty accurate on this data, which was like real data. So, in some cases you can exploit + the way data will appear, and then generate the synthetic data. But in many cases, it may not be possible. Like + the project I mentioned in Social Media where the text contains a lot of non standard words. Simply replacing + the non standard words with synonyms may not do the job, because then you take the fun aspect away from Social + Media, right? It should be as fun and entertaining when you listen to Social Media text as it is when you look + at it. +
++ So, you have to do some kind of clever replacement. So for that you need some kind of human expert going there + and doing that. Crowdsourcing, I think, is one way to get data quickly. It is pretty reliable. I have seen in + the NLP community, like NLP papers that appears in ACL and they rely heavily on the Amazon Mechanical Turk and + other online incentivized data collection mechanisms. So, that I think is one thing. +
++ The other thing I do, you know, in my classes, especially, I get the students to help each other out to collect + the data. It doesn't have to be that intensive. Every day, if they just, even one student collects, like, ten + data points, over the semester there can be enough data for a lot of things. So, you know, in each of their + projects and in the end of the course, pretty much they will have a lot of data for research. So, everybody can + contribute in a way. Students, especially, are much more reliable because they are familiar with the mechanisms, + how to label, collect data and all that stuff. They can understand how things work, as well. So, it is like a + win win. +
++ >> CARLOS DUARTE: Yes. Thank you for that contribution. Good suggestion. And Lourdes, we are really running out + of time, but if you still want to intervene, I can give you a couple of minutes. +
++ >> LOURDES MORENO: Okay. I think that also we don't find, we need a few data but in my vision is also negative + because obtaining the dataset is expensive. And in accessible communication, I work in simplification, this data + must be prepared by the expert in Accessibility. It is important that this data is validated by people with + accessibility, and use plain language resources. And then it is a problem to obtain data with quality. +
++ >> CARLOS DUARTE: Okay. Thank you so much, Lourdes. And thanks, a very big thank you to the three of you, + Chaohai, Vikas and Lourdes. It was a really interesting panel. Thank you so much for your availability. +
++ >> CARLOS DUARTE: Okay. Since we are, we should have already started the closing keynote, I am going to move on + to introducing Shari Trewin. + She is an Engineering Manager at Google, leading a team that develops Assisted Technologies. So, I am really + looking forward to your vision of what is next, what is the future holding for us in Assisted AI. So, as we had + yesterday, at the end of the keynote Jutta will join us and we will have this even more interesting conversation + between Shari and Jutta, making it really appetizing for the keynote. + So, Shari, the floor is yours. + +
++ >> SHARI TREWIN: Okay, thank you very much. Can you hear me okay? +
++ >> CARLOS DUARTE: Yes. +
++ >> SHARI TREWIN: Okay. What a pleasure it is to participate in the symposium and hear from our opening keynote + Jutta, and all out panelist over the last two days. Thank you so much for inviting me. It's my privilege to + finish this up now. Yesterday Jutta grounded us all in the need to do no harm and talked about some of the ways + we can think about detecting and avoiding harm. + Today I will focus on digital accessibility applications of AI in general and ask what is next for Assistive AI. + +
++ So, my name is Shari Trewin. I am an Engineering Manager in the Google Accessibility team. I'm also the past + chair of the ACM's SIGACESS, Special Interest Group on Accessible Computing. My background is computer science + and AI. I have been thinking about the ways that AI plays into accessibility for many years. Much of my work in + thinking on AI and the AI fairness was done when I worked at IBM as a Program Director for IBM Accessibility. A + shout out to any IBM friend in the audience. + At Google, my team focuses on developing new assistive capabilities and as we have been discussing for the last + few days, AI has an important role to play. + +
++ There has been a lot of buzz in the news lately, both exciting and alarming, about generative AI, especially + these Large Language Models. For example, the ChatGPT model from OpenAI has been in the news quite a bit. In + case you haven't played with it yet, here is an example. I asked ChatGPT how will AI enhance Digital + Accessibility. Let's try to get it to write my talk for me. It responded with a positive viewpoint. It said AI + has the potential to significantly improve Digital Accessibility for People with Disabilities. Here are a few + ways that AI can contribute to this goal. + It went on to list four examples of transformative AI. All of these have been major topics at this symposium. + For each one it gave a one or two sentence explanation of what it was, and who it is helpful for. +
++ Finally, it concluded that AI has the potential to make digital content and devices more accessible to People + with Disabilities, allowing them to fully participate in the digital world. It seems pretty convincing and well + written. + Perhaps I should just end here and let AI have the last part. But, you know, it is kind of mind blowing, + although it was pretty terrible at jokes. + So, what it can do without explicitly being connected to any source of truth, it does get things, sometimes, + flat out wrong, with the risk of bias in the training data being reflected in the prediction. +
++ This limits the ways we can apply this technology today, but it also gives us a glimpse into the future. I am + not going to take medical advice from a generative AI model yet, but as we get better at connecting this level + of language fluency with knowledge, improving the accuracy, detecting and removing bias, this opens up so many + new possibilities for interaction models, and ways to find and consume information in the future. + So, I will come back to that later. + +
++ For today's talk, I am going to slice the topic a little bit differently. I want to focus on some of the general + research directions that I see as being important, moving Digital Accessibility forward with AI. + In our opening keynote, Jutta laid out some of the risks that can be associated with AI. It is not created and + applied with equity and safety in mind. It is important to keep these considerations in mind as we move forward + with AI. When the benefits of AI do outweigh the risks in enabling digital access, we still have a way to go in + making these benefits available to everyone, in fact, to make them accessible. + So, start by talking about some current effects in that direction, making Assistive AI itself more inclusive. + The second topic I want to cover is where we choose to apply AI, focusing in what I call AI at source. And + finally, Web Accessibility work in role emphasizes the need to shift left, that is to bake accessibility in as + early as possible in the development of the digital experience. So, I will discuss some of the places where AI + can help with that shift left, and highlight both opportunities and important emerging challenges that we have + for Web Accessibility. +
++ So, we know that AI has already changed the landscape of assistive technology. So, one research direction is how + do we make these AI models more inclusive? And I want to start with a little story about captions. In 2020, I + was accessibility chair for a very large virtual conference. We provided a human captioner, who was live + transcribing the sessions in a separated live feed. I am showing an image of a slide from a presentation here + with a transcription window to the right. I spoke with a Hard of Hearing attendee during the conference who used + captions to supplement what he could hear. He told me, well, the live feed had quite a delay, so he was also + using automated captions that were being streamed through the conference provider, let's add them to this view, + highlighted in green. This had a little less delay but had accuracy problems, especially for foreign speakers or + people with atypical speech. And especially for people's names or technical terms. You know, the important + parts. + So, he also turned on the automated captions in his browser which used a different speech detect engine. I added + those on the screen, too. And he supplemented that with an app on his phone, using a third different speech + recognition engine capturing the audio as it was played from his computer and transcribing it. So that is four + sources of captions to read. + None of them was perfect, but he combined them to triangulate interpretations where the transcriptions seemed to + be wrong. So, we could say AI powered captions were helping him to access the conference, no doubt about it, but + it wasn't a very usable experience. He was empowered but he also had a huge burden in managing his own + accessibility, and there were still gaps. + +
++ As Michael Cooper pointed out yesterday, imperfect captions and descriptions can provide agency, but can also + mislead users and waste their time. + I also want to point out this particular user was in a really privileged position, because he knows about all + these services, he has devices powerful enough to stream all these channels. He has good internet access. He has + a Smartphone. He has the cognitive ability to make sense of this incredible information overload. This really + isn't equitable access, right? And the captions themselves were not providing accurate representation of the + conference speakers, so those with atypical speech were at a disadvantage in having their message communicated + clearly, so there is an important gap to be filled. + One of the current limitations of automated captions is poor transcriptions of people with atypical speech, + especially when they are using technical or specialized language. For example, Dimitri Kavensky is a Google + researcher and inventor, he is an expert in optimization and algebraic geometry, among many other topics. He is + Russian and deaf, both of which affect his English speech. I will play a short video clip of Dimitri. + +
++ (Pre Captioned Video) +
++ So, Dimitri said, Google has very good general speech recognition, but if you do not sound like most people, it + will not understand you. On the screen a speech engine translated that last part of his sentence as “but if you + look at most of people, it will look and defended you”. So, People with Disabilities that impact speech such as + Cerebral Palsy, stroke, Down Syndrome, Parkinson's, ALS, are also impacted by lack of access to speech + recognition, whether it is for controlling a digital assistant, communicating with others or creating accessible + digital content. + I want to go to the next slide. + +
++ So, Google's project Euphonia, has set out to explore whether personalized speech recognition models can provide + accurate speech recognition for people with atypical speech, like Dimitri. And this is a great example of the + way research can move the state of the art forward. The first challenge, as many people have mentioned today, is + the lack of suitable speech data. Project euphonia collected over a million utterances from people with speech + impairments and the researchers built individual models for 432 people and compared them to state of the art + general models. They found the personalized models could significantly reduce the word error rates, and so the + error rates had gone from something like 31% with the generated models down to 4.6%. + So, it is not just a significant improvement, but it is enough of improvement that gets to a high enough point + to make the technology practical and useful. In fact, they found these personalized models could sometimes + perform better than human transcribers for people with more several disorder speech. Here is an example of + Dimitri using his personal speech recognition model. + +
++ (Captions on Smartphone demonstration in video) +
++ So, the transcription this time is make all voice interactive devices be able to understand any person speak to + them. It is not perfect but it is much more useful. Project Euphonia started in English but it is now expanding + to include Hindi, French, Spanish and Japanese. + So, that project demonstrated how much better speech recognition technology could be, but the original data + wasn't shareable outside of Google and that limited the benefits of all that data gathering effort. + +
++ So, the Speech Accessibility Project at the University of Illinois is an example of what we might do about that + problem. It is an initiative to make a dataset for broader research purposes. + It was launched in 2022, and it is a coalition of technologists, academic researchers and community + organizations. The goal is to collect the diverse speech dataset for training, speech recognition model, to do + better at recognizing atypical speech. It is building on some of the lessons learned in project euphonia, paying + attention to ethical data collection, so individuals are paid for participating, their samples are de-identified + to protect privacy. The dataset is private, and it is managed by UIUC and made available for research purposes + and this effort is backed by cross-industry very broad support from Amazon, Apple, Google, Meta, and Microsoft. + It's going to enable both academic researchers and partners to make progress. Although the current work is focus + on speech data, this is in general a model that could be used for other data that's needed to make models more + inclusive. We could think of touch data. There are already significant efforts going on together. Sign Language + video data for Sign Language translation. + +
++ And Project Relate is an example of the kind of app that can be developed with this kind of data. It is an + Android app that provides individuals with the ability to build their own personalized speech models and use + them for text to speech, for communication and for communicating with home assistants. +
++ Personalized speech models look really promising, and potentially a similar approach to be taken to build + personalized models for other things like gesture recognition, touchscreen interactions, interpreting inaccurate + typing. I think there is a world of opportunity there that we haven't really begun to explore. + So, now that we know we can build effective personal models from just a few hundred utterances, can we learn + from this? How to build more inclusive general models, would be a very important goal. + +
++ Can we improve the performance even further by drawing on a person's frequently used vocabulary? Can we prime + models with vocabulary from the current context? And as Shivam Singh mentioned yesterday, we're beginning to be + able to combine text, image, and audio sources to provide a richer context for AI to use. So, there's very fast + progress happening in all of these areas. Just another example, the best student paper at the ASSETS 2022 + conference was using vocabularies that were generated automatically from photographs to prime the word + prediction component of a communication system for more efficient conversation around those photographs. +
++ Finally, bring your own model. I really agree with Shaomei Wu when she said yesterday use cases of media + creation are under investigated. We can apply personalized models in content creation. Think about plugging in + your personal speech model to contribute captions for your live streamed audio for this meeting. The potential + is huge, and web standards might need to evolve to support some of these kind of use cases. +
++ When we talk about assistive AI, we're often talking about other technologies that are being applied at the + point of consumption, helping an individual to overcome accessibility barriers in digital content or in the + world. I want to focus this section on AI at source and why that is so important. Powerful AI tools in the hands + of users don't mean that authors can forget about accessibility. We have been talking about many examples of + this through this symposium, but here are a few that appeal to me. +
++ So, I am showing a figure from a paper. The figure is captioned user response time by authentication condition. + And the figure itself is a boxplot that shows response times from an experiment for six different experimental + conditions. + So, it is a pretty complex figure. And if I am going to publish this in my paper, my paper is available, I need + to provide a description of this image. There is so much information in there. When faced with this task, about + 50% of academic authors resort to simply repeating the caption of the figure. And this is really no help at all + to a blind scholar. + They can already read the caption. That is in text. So, usually the caption is saying what information you will + find in the figure, but it is not giving you the actual information that is in the figure. + +
++ Now, as we discussed in yesterday's panel, the blind scholar reading my paper could use AI to get a description + of the figure, but the AI doesn't really have the context to generate a good description. Only the author knows + what is important to convey. + At the same time, most authors aren't familiar with the guidelines for describing images like this. And writing + a description can seem like a chore. That is why I really love the idea that Amy Pavel shared yesterday for ways + that AI tools could help content creators with their own description task, perhaps by generating an overall + structure or initial attempt that a person can edit. + +
++ There are existing guidelines for describing different kinds of charts. Why not teach AI how to identify + different kinds of charts and sort of generate a beginning description. + And Shivam Singh was talking yesterday as well about recent progress in this area. + Ideally the AI could refine its text in an interactive dialogue with the author, and a resulting description + would be provided in the paper and anyone could access it, whether or not they had their own AI. So, that is + what I mean by applying AI at source. Where there is a person with the context to make sure the description is + appropriate, and that can provide a better description. + Of course, it can only provide one description. There is also an important role for image understanding that can + support personalized exploration of images. So that a reader could clearly read information that wasn't + available in a short description, like what were the maximum and minimum response times for the gesture + condition in this experiment. + I am not saying that AI at source is the only solution, but it is important, and perhaps, an undeveloped piece. + +
++ Here is a second example. I love examples! As we were just talking about in the earlier panel, text + transformations can make written content more accessible. So, for example, using literal language is preferable + for cognitive accessibility. + So, an idiom like "she was in for a penny, in for a pound," can be hard to spot if you are not familiar with + that particular idiom and can be very confusing if you try to interpret it literally. Content authors may use + this kind of language without realizing. Language models could transform text to improve accessibility in many + ways, and one is by replacing idioms with more literal phrasing. + So, I asked the language model to rephrase this sentence without the idiom and it came up with a sensible, + although complex literal replacement. "she decided to fully commit to the situation, no matter the cost." Again, + this can be applied as a user tool, and as a tool for authors to help them identify where their writing could be + misinterpreted. + So, one puts the onus on the consumer to bring their own solution, apply it and be alert for potential mistakes. + The other fixes the potential access problems at source, where the author can verify accuracy. + +
++ As I mentioned earlier, because today's Large Language Models are not connected to a grounded truth, and they do + have a tendency to hallucinate, applying them at source is one way to reach the benefit much more quickly + without risking harm to vulnerable users. + Once we collect language models, connect them to facts, or connect speech to the domain of discourse, well, we + will really see a huge leap in performance, reliability and trustworthiness. + So, in the previous two examples, AI could be applied at source. What about when the AI has to be on the + consumer side, like when using text to speech to read out text on the web? + +
++ On the screen here is the start of the Google information side bar about Edinburgh, the capital city of + Scotland. There is a heading, subheading and main paragraph. Text to speech is making huge advances with more + and more natural sounding voices becoming available and the capability of more expressive speech, which itself + makes comprehension more easy. And expressiveness can include things like adjusting the volume, verbosity. When + reading a heading, maybe I would naturally read it a little louder. Pause afterwards. For a TTS service to do + the best job reading out text on the web, it helps to have the semantics explicitly expressed. + For example, the use of heading markups on Edinburgh on this passage. + It is also important that domain specific terms and people's names and or place names are pronounced correctly. + Many people not from UK on first sight would pronounce Edinburgh as Edinburgh. Web standards, if they're applied + properly, can mark up the semantics like headings and pronunciation of specialized or unusual words, helping the + downstream AI to perform better. AI can also be used to identify the intended structure and compare against the + markup or identify unusual words or acronyms where pronunciation information could be helpful. + And then the passage can be read appropriately by your preferred text to speech voice, at your preferred speed + and pitch. + +
++ It can also be used by a speech to text model to marry the vocabulary on the page with what you are saying as + you are interacting with the page, to use voice controls. + So, I am showing you this example to illustrate that Web Accessibility standards work together with Assistive AI + techniques to enable the best outcome. And many uses of Assisted Technology can benefit from this information. + So, thinking about applying AI at source, there is an important role here for AI that makes sure that the visual + and structural DOM representations are aligned. + So, I want to reiterate the powerful benefits of applying AI at authoring time, that these examples illustrate. + +
++ So, first off, we are removing the burden from People with Disabilities to supply their own tools to bridge + gaps. Secondly, it benefits more people, including those people who don't have access to the AI tools. People + with low end devices, poor internet connectivity, less technology literacy. + Thirdly, a content creator can verify the accuracy and safety of suggestions, mitigating harms from bias or + errors, because they have the context. + And AI can also potentially mitigate harms in other ways. For example, flagging videos, images or animations + that might trigger adverse health consequences for some people, like flashing lights. + + +
++ So, AI inside is likely to reach more people than AI provided by end users. I think this is how we get the most + benefit for the least harm. + It is also a huge opportunity to make accessibility easier to achieve. AI can make it much quicker and easier to + generate the accessibility information, like captions or image descriptions, as we discussed. + And lower the barrier entry with assistive tools is one way to encourage good accessibility practice. + AI can proactively identify where accessibility work is needed. And evaluate designs before even a line of code + has been written. +
++ But perhaps the biggest opportunity and the greatest need for our attention is the use of AI to generate code, + which brings us to the final section of this talk. +
++ So, in the previous section we talked about ways that AI can be applied in content creation to help build + accessibility in. But AI itself is also impacting the way websites are designed and developed, independent of + accessibility. So, in this section, let's think about how this change will impact our ability to bake + accessibility in, and can we use AI to help us? +
++ As accessibility advocates, we have long been pushing the need to shift left. By that, we mean paying attention + to accessibility right from the start of a project, when you are understanding the market potential, when you + are gathering the requirements, when you are understanding and evaluating risks, developing design, and + developing the code that implements those designs. + In a reactive approach to accessibility, which is too often what happens, the first attention to accessibility + comes when automated tools are run on an already implemented system. Even then they don't find all issues and + may not even find the most significant ones which can lead teams to prioritize poorly. So, with that reactive + approach, teams can be kind of overwhelmed with hundreds or even thousands of issues, kind of linked in their + process, and have difficulty tackling it and it makes accessibility seem much harder than it could be. + +
++ So, this morning's panel, we discussed ways AI can be used in testing to help find accessibility problems. Ai is + also already being used earlier in the process by designers and developers. In development, for example, GitHub + Copilot as an AI model that makes code completion predictions. GitHub claims in files where it is turned on, + nearly 40% of code is being written by GitHub Copilot in popular coding languages. There are also systems that + generate code from design wireframes or from high resolution mockups, or even from text prompts. So, it is + incumbent on us to ask, what data are those systems trained on. In the case of Copilot, it is trained on GitHub + open source project code. + So, what is the probability that this existing code is accessible? We know that we still have a lot of work to + do to make Digital Accessibility the norm on the web. Today is the exception. And many of you probably know + WebAIM does an annual survey of the top million website Home Pages. It runs an automated tool and imports the + issues that it found. Almost 97% of their million pages had accessibility issues. And that is only the + automatically detectable ones. They found an average of 50 issues per page, and they also found that page + complexity is growing significantly. Over 80% of the pages they looked at had low contrast text issues. + More than half had alternative text missing for images. Almost half had missing form labels. So, even though + these are issues, they're easy to find with the automated tools we have today, these are still not being + addressed. These are very basic accessibility issues and they are everywhere. Though we know what this means + from AI models learning from today's web. + +
++ Here is an example of how this might be playing out already. So, code snippets are one off the most common + things that developers search for. A Large Language Model can come up with pretty decent code snippets and it is + a game changer for developers and is already happening. Let's say a developer is new to Flutter, the new + Google's open source mobile app development platform. They want to create a button labeled with an icon known as + an icon button. + On the slide is the code that ChatGPT produced when asked for a Flutter code for an icon button. Along with the + code snippet, it is also provided some explanation and it even links to the documentation page, so it is pretty + useful. The code it gave for an icon button includes a reference to what icon to use, and a function to execute + when the button is pressed. + There is really just one important difference between the example generated by ChatGPT, and the example given in + the Flutter documentation. The ChatGPT didn't include a tool tip, which means there is no text label associated + with this button. That is an accessibility problem. Let's give it credit, ChatGPT did mention that it is + possible to add a tooltip, but developers look first at the code example. If it is not in the example, it is + easily missed. But in the training data here, it seems the tooltip was not present enough of the time for it to + surface as an essential component of an icon button. + +
++ So, there are a lot of example code available online, but how much of that code demonstrates accessible coding + practices given the state of Web Accessibility, it is likely the answer is not much. + So, our AI models are not going to learn to generate accessible code. It is really just like the societal bias + of the past being entrenched in training sets of today. The past lack of accessibility could be propagated into + the future. So, here we have an opportunity, and a potential risk. + AI can help to write accessible code, but it needs to be trained on accessible code, or augmented with the tools + that can correct accessibility issues. And I think that is important to point out, as well, I deliberately used + an example in a framework, rather than an HTML example, because that is what developers are writing in these + days. + They are not writing raw HTML. They are writing frameworks, and there are many, many different frameworks, each + with their own levels of accessibility, and ways to incorporate accessibility. + +
++ So, one thing is that the theme of this morning about data being really essential comes up here again. Do we + have training data to train a code prediction model, perhaps with transfer learning to generate more accessible + code. Do we have test settings, even, that we can test code generation for its ability to produce accessible + code. + So, when we are developing datasets for other training or testing, we have to think in terms of the diversity of + frameworks and methods that developers are actually working with, if we want to catch those issues at the point + of creation. + Again, where AI is generating code for a whole user interface based on a visual design, we need to be thinking + about what semantics should that design tool capture to support the generation of code with the right structure, + the right roles for each area, kind of the basic fundamentals of accessibility. + +
++ So, um, a final call to action for the community here is to think about, what do we need to do here? Whether it + is advocacy, awareness raising, research, data gathering, standards, or refining models to write accessible + code. This technology is still really young. It has a lot of room for improvement. This is a perfect time for us + to define how accessibility should be built in, and to experiment with different ways. + And, you know, in my opinion, this, perhaps more than anything, is the trend we need to get in front of as an + accessibility community, before the poorer practices of the past are entrenched in the automated code generators + of the future. AI is already shifting left, we must make sure accessibility goes with it. +
++ So, to summarize, we can broaden access to Assistive AI through personalization. To get the benefits of AI based + empowerment to all users, we should make sure that AI integration with authoring tools and processes is applied + where it can, to make it easier to meet accessibility standards and improve the overall standard. Born + accessible is still our goal and AI can help us get there if we steer it right. As a community we have a lot of + work to do, but I am really excited about the potential here. +
++ So, thank you all for listening. Thanks to my Google colleagues and IBM Accessibility team, also, for the + feedback and ideas and great conversations. + Now I want to invite Jutta to join. Let's have a conversation. +
++ >> JUTTA TREVIRANUS: Thank you, Shari. I really, really appreciate your coverage of authoring and the prevention of barriers and the emphasis on timely proactive measures. There may be an opportunity actually to re-look at authoring environments, et cetera, within W3C. +
++ >> SHARI TREWIN: Yes, just to respond to that really quickly. I do wonder, like, should we be focusing on evaluating frameworks more than evaluating individual pages? You know? I think we would get more bang for our buck if that was where we paid attention. +
++ >> JUTTA TREVIRANUS: Yes. Exactly. The opportunity to, and especially as these tools are now also assisting authors, which was part of what the authoring standards were looking at prompting, providing the necessary supports, and making it possible for individuals with disabilities to also become authors of code and to produce code. + So, the greater participation of the community, I think, will create some of that culture shift. So, thank you very much for covering this. + +
++ So, in terms of the questions that we were going to talk about, you had suggested that we might start with one of the thorny questions asked yesterday that we didn't get time to respond to. So, the question was: Do you think that AI and big companies such as Google and Meta driving research in AI can be problematic with respect to social, societal issues, which don't necessarily garner the highest revenue? And, if so, how do you think we can approach this? +
++ >> SHARI TREWIN: Yes. Thank you, Jutta and thank you to the person who asked that question, too. You know, it is true that company goals and society can pull in different directions. I do think there are benefits to having big companies working on these core models, because they often have better access to very large datasets that can, you know, bring breakthroughs that others can share in, that can help raise the tide to raise all votes, but advocacy and policy definitely have an important role to play in guiding the application of AI and the direction of AI research, the way it is applied. + Also, I wanted to say one approach here could be through initiatives like the speech accessibility project that I talked about. So, that is an example of big tech working together with advocacy groups and academia to create data that can be applied to many different research projects, and that is a model that we can try to replicate. + +
++ >> JUTTA TREVIRANUS: Do you think, you talked quite a bit about the opportunity for personalization. Of course, one of the biggest issues here is that large companies are looking for the largest population, the largest profit, which means the largest customer base, which tends to push them toward thinking about, not thinking about minorities, diversity, etc. But the training models and the personalization strategies that you have talked about are things that are emerging possibilities within large learning models. We have if opportunity to take what has already been done generally, and apply more personalized, smaller datasets, etc. Do you think there is a role for the large companies to prepare the ground, and then for the remaining issues to piggy back on that with the new training sets? Or, do you think even there we are going to have both cost and availability issues? +
++ >> SHARI TREWIN: Well, yeah. I think that the model that you described is already happening in places like with the speech accessibility project. The ultimate goal would be to have one model that can handle more diverse datasets. And it takes a concerted effort to gather that data. But if the community gathered the data and it was possible to contribute that data, then that is another direction that we can influence the larger models that are depending on large data. + But personalization, I think will be very important for tackling some of that tail-end. So, personalization is not just an accessibility benefit. There are a lot of tail populations, small end populations, that add up to a large end for a lot of people. + The more the, I think that the big companies benefit greatly by exploring these smaller populations and learning how to adapt models to different populations, and then, as I mentioned, the ultimate goal would be to learn how to pull that back into a larger model without it being lost in the process. + +
++ >> JUTTA TREVIRANUS: Yes. We have the dilemma that the further you are from the larger model, the more you actually need to work to shift it in your direction. So, that is something I think that will need to be addressed whatever personalization happens. The people that need the personalization the most will have the greatest difficulty with the personalization. + Do you think there is any strategies that might be available for us to use to address that particular dilemma? + +
++ >> SHARI TREWIN: Yeah. Yes. You are touching my heart with that question, because I really, that has been an ongoing problem in accessibility forever. Not just in the context of AI, but people who would benefit the most from personalization may be in a position that makes it hard to discover and activate even personalizations that are already available. + So, one approach I think that works in some context is dynamic adaptation. Where, instead of a person needing to adapt to a system, the system can effectively adapt to the person using it. I think that works in situations where the person doesn't need to behave any different to take advantage of that adaptation. + It doesn't work so well where there is maybe a specific input method that you might want to use that would be beneficial where you need to do something different. + So, for language models, maybe we can manage an uber language model that, first, recognized oh, this person's speech is closest to this sub model that I have learned. + And I am going to use that model for this person, and you can think of that in terms of... + +
++ >> JUTTA TREVIRANUS: Increasing the distance, yeah. +
++ >> SHARI TREWIN: Yeah. So, that is one idea. What do you think? +
++ >> JUTTA TREVIRANUS: Yes. I am wondering if there is an opportunity, or if there ever will be taken an opportunity, to re-think just how we design, what design decisions we make, how we develop and bring the systems to market, such that there is the opportunity for greater the democratization or access to the tools, and that we don't begin with the notion of, let's design first for the majority, and then think about, I mean, this is an inflection point. + There is an opportunity for small datasets, zero shot training, et cetera, transfer, transformation transfer. Is this a time where we can have a strategic push to say, let's think about other ways of actually developing these tools and releasing these tools. Maybe that is a little too idealistic, I don't know what your thinking is there? + +
++ >> SHARI TREWIN: Yes. I think especially if you are in a domain where you have identified that there is, you know, real risk and strong risk of bias, it should be part of the design process to include people who would be outliers, people who are going to test the boundaries of what your solution can do, people that are going to help you understand the problems that it might introduce. + So, it is what should happen, I think, in design, in any system. But especially if you are baking in AI, you need to think about the risks that you might be introducing, and you can't really think about that without having the right people involved. + +
++ Somebody yesterday, I think, mentioned something about teaching designers and developers more about accessibility and I think that is a really important point, too. That building diverse teams is really important. Getting more diversity into computer science is really important. But teaching the people who are already there, building things, is also important. + I don't meet very many people who say, oh, I don't care about accessibility. It is not important. It is more that it is still too difficult to do. And that is one place when I think AI can really, really help in some of the tools that people have talked about today. The examples of, where, if we can make it easy enough and lower that barrier, and take the opportunity of these creation points to teach people, as well, about accessibility. So, not always to fix everything for them, but to fix things with them so that they can learn going forward and grow. I think that is a really exciting area. + +
++ >> JUTTA TREVIRANUS: And a great way to support born accessible, accessible by default with respect to what is the tools used to create it. + You contributed some questions that you would love to discuss. And one of the first ones is: Is AI's role mostly considered as improving Assistive Technology and Digital Accessibility in general? Of course, this gets to the idea of not creating a segregated set of innovations that specifically address People with Disabilities, but also making sure that the innovations that are brought about by addressing the needs of people whose needs, well, who face barriers, can benefit the population at large. + So, what do you think? What is the future direction? + +
++ >> SHARI TREWIN: Yeah. This was a question that came from an attendee that they put into the registration process. I do think it is really important to view AI as a tool for Digital Accessibility in general, and not to just think about the end user applications. Although those personal AI technologies are really important, and they are life changing, and they can do things that aren't achievable in any other way. + But AI is already a part of the development process, and accessibility needs to be part of that, and we have so many challenges to solve there. I think it is an area that we need to pay more attention to. So, not just applying AI to detect accessibility problems, but engaging those mainstream development tools to make sure that accessibility is considered. + +
++ >> JUTTA TREVIRANUS: One sort of associated piece to this that came to mind, and I am going to take the privilege of being the person asking the questions, I mean, the focus of most of AI innovation has been on replicating and potentially replacing human intelligence, as opposed to augmenting, or thinking about other forms of intelligence. + I wonder whether the, I mean, our experiences in Assistive Technology, and how technology can become an accompaniment or an augmentation, rather than a replacement, might have some insights to give in this improvement of digital inclusion? + +
++ >> SHARI TREWIN: Yeah. I think you are absolutely right. It is human AI cooperation and collaboration that is going to get us the best results. The language models that we have, the promise that they have, to be more interactive, dialogue like interactions, are heading in a direction that are going to support much more natural human AI dialogue. And accessibility is such a complex topic, where it is not always obvious what I am trying to convey with this image. How important is this thing. + It is not necessarily easy to decide what exactly is the correct alternatives for something, or there is plenty of other examples where the combination of an AI that has been trained on some of the general principles of good accessibility practice, and a person who may not be as familiar, but really understands the domain and the context of this particular application, it is when you put those two things together that things are going to start to work, so the AI can support the person, not replace the person. + +
++ >> JUTTA TREVIRANUS: And, of course, the one issue that we need to, thorny issue, that we need to overcome with respect to AI is the challenge of addressing more qualitative, non-quantitative values and ideas, etc. So, it will be interesting to see what happens there. +
++ >> SHARI TREWIN: Yes. Yes. Yeliz had a very good suggestion this morning, perhaps we should pay attention to how people are making these judgments. How do accessibility experts make these judgments? What are the principles and can we articulate those better than we do now, and communicate those better to designers. +
++ >> JUTTA TREVIRANUS: Right. This notion of thick data, which includes the context. Because frequently we isolate the data from the actual context. And many of these things are very contextually bound, so, do you see that there might be a reinvestigation of where the data came from, what the context of the data was, et cetera? +
++ >> SHARI TREWIN: I think there may be a rise in methods that bring in the whole context, bring in more on the context, multimodal inputs. Even for speech recognition. It is doing what it does without even really knowing the domain that it is working in. And that is pretty mind blowing, really. + But when it breaks down is when there are technical terms, when you are talking about a domain that is less frequently talked about, less represented. And bringing in that domain knowledge, I think is going to be huge, and, similarly, in terms of hoping to create text alternatives for things, the domain knowledge will help to get a better kind of base suggestion from the AI. + Perhaps with dialogue, we can prompt people with the right questions to help them decide, is this actually a decorative image, or is it important for me to describe what is in this image? That is not always a trivial question to answer, actually. + +
++ >> JUTTA TREVIRANUS: Right. That brings in the issue of classification and labeling, and the need to box or classify specific things. And many of these things are very fuzzy context, and classifiers are also determined hierarchically and maybe there is... +
++ >> SHARI TREWIN: Yes. Maybe we don't need a perfect classifier, but we need a good dialogue where the system knows what questions to ask to help the person decide. +
++ >> JUTTA TREVIRANUS: Right. And, oh, I just saw a message from Carlos saying we are only down to a few more minutes. Can we fit in one more question? +
++ >> SHARI TREWIN: I actually have to stop at the top of the hour. +
++ >> JUTTA TREVIRANUS: Oh, okay. We will have an opportunity to answer the questions that people have submitted in the question and answer dialogue, and we have access to those, so Shari will be able to respond to some of these additional questions that have been asked. Apologies that we went a little over time, Carlos. I will turn it back over to you. +
++ >> CARLOS DUARTE: No. Thank you so much. And thank you, Shari, for the keynote presentation. Thank you Shari and Jutta, I was loving this discussion. It is really unfortunate that we have to stop now. But, thank you so much for your presentations. Thank you, also, to all the panelists yesterday and today for making this a great symposium. Lots of interesting and thought provoking ideas. +
++ And, thank you all for attending. We are at the top of the hour, so we are going to have to close. Let me just, a final ask from me. When you exit this Zoom meeting, you will receive a request for completing a survey, so if you can take a couple of minutes from your time to complete it, it will be important information for us to make these kinds of events better in the future. +
++ Okay. Thank you so much, and see you in the next opportunity. +
+ + + \ No newline at end of file diff --git a/pages/about/projects/wai-coop/symposium3.md b/pages/about/projects/wai-coop/symposium3.md new file mode 100644 index 00000000000..faba11e8448 --- /dev/null +++ b/pages/about/projects/wai-coop/symposium3.md @@ -0,0 +1,1824 @@ +--- +title: "Evaluating Accessibility: Meeting Key Challenges - Online Research Symposium November 2023" +title_html: "Evaluating Accessibility: Meeting Key ChallengesTranscripts: Full transcripts are available in transcripts.
+Date: Updated 13 December 2023.
+Editors: Letícia Seixas Pereira and Carlos Duarte. Contributors: Kevin White, and Shawn Lawton Henry.
+Developed as part of the WAI-CooP project, co-funded by the European Commission.
+--- + +![An EU Project]({{ "/content-images/about/eu.svg" | relative_url }}){:.right.small} + +{::nomarkdown} +{% include toc.html type="start" title="Page Contents" class="full" %} +{:/} + +{::options toc_levels="2" /} +- The TOC will replace this text. +{:toc} + + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + + +## Introduction +{:#introduction} + +Researchers, practitioners, and users with disabilities participated in an international online symposium exploring best practices and challenges in accessibility evaluation and monitoring. + +This online symposium took place on 16 November 2023 and brought together researchers, academics, industry, government, and people with disabilities, to explore practices and challenges involved in monitoring and evaluating digital accessibility. +This symposium aimed to discuss current challenges and opportunities in three main areas, digital accessibility training, mobile accessibility, and Artificial Intelligence. + +Videos from the sessions will soon be available. + +## Session 1: Digital Accessibility Training and Education +- Moderator: Jade Matos Carew (The Open University, UK) +- Sarah Lewthwaite (University of Southampton, UK) +- Audrey Maniez (Access42, FR) + +{% include excol.html type="start" id="session1-transcription" %} +### Transcript of Session 1: Digital Accessibility Training and Education +{:#session1-transcript} + +{% include excol.html type="middle" %} +CARLOS DUARTE: And so now let's move to the first session. This is a session, as I mentioned, on Digital Accessibility Training and Education. It's going to be moderated by Jade from The Open University in UK. And our two panelists will be Sarah from the University of Southampton, also in UK, and Audrey from Access42 in France. So Jade, you can begin our first session. Thank you.
+ + +JADE MATOS CAREW: Thanks, Carlos. Hi, everyone. My name's Jade. I am Head of Digital Accessibility + and Usability at The Open University. It's a real privilege to be here today moderating this session for you. + So I am joined by two wonderful experts in the field of digital accessibility training and education. So we've got + Sarah Lewthwaite, who is a Senior Research Fellow based at the University of Southampton. We've got Audrey Maniez, + who is Director at Access42, and I will let them introduce themselves properly when we get going.
+When you were registered for today, you sent in some kind of questions, and we had a look at them. And some of them + were quite wide and varied. And because we have the experts in the room with us today, we wanted to make sure that + the focus was really exclusively on digital accessibility training and education. So apologies if we don't answer + any specific questions. If you think of any questions along the way, please ask them in the Q&A. And you can also + comment in the chat as well. We are really open to welcoming questions and comments. It's important to know what's + happening out in the wide world, and so we can react to what you are also doing in this field.
+I think that's all I need to say. I am going to hand over, maybe I'll hand over to Sarah. Do you want to introduce + yourself?
+ ++ SARAH LEWTHWAITE: Hello, everybody. Welcome to our session. Thank you for joining us. My name is + Sarah Lewthwaite. I am here at the University of Southampton, where I lead a project called Teaching Accessibility + in the Digital Skillset, and we've been researching the how teaching of accessibility, the content, the approaches, + the strategies and tactics that educators use in the workplace and also in academia. And I am based at the Center + for Research and Inclusion. And I am also previously a member of the Web Accessibility Task Force for Curricula + Development, part of the Education and Outreach Working Group on the Web accessibility Initiative. I will pass over + to Audrey. +
++ AUDREY MANIEZ: Hey, I'm Audrey. I am a digital access specialist at Access42. We are a company + specialized in digital accessibility. We are based in France, so English is not my native language, sorry. And I am + doing accessibility for more than 12 years now. I do audits. I deliver trainings, et cetera. And I also manage the + training center for Access42, where we offer professional trainings based on accessibility. +
+ +JADE MATOS CAREW: Thank you. So we've got kind of a broad agenda within education and training, so + we are going to be looking at things like resources, training needs in the workplace, and how we can embed + accessibility in the curricula. But we thought we might kick off with having a look at some resources, how to get + started with this. It's quite a complex area with lots of different themes throughout it. + So who wants to take that first? Audrey or Sarah, how do we get started? What resources are we looking for when we + are getting started with digital accessibility? +
+SARAH LEWTHWAITE: Well, I suppose the first thing to say would be that the W3C has some really + interesting resources available. And Jade, you might want to talk to some of that. + Also, we've obviously got some great repositories of videos and resources. I know the University of Boulder, + Colorado, has a huge collection that they've been building, and Teach Access have also been collecting resources by + teachers about how they've been using Teach Access funds to then develop accessibility teaching in different + classrooms. + Audrey, would you like to comment? +
+AUDREY MANIEZ: Just to say that the resources you cited are really great, and it's important to + identify the authors of resources. That's a really great point. So resources that are created by the W3C are really + a good point because we can find a lot of articles on the Web. And some of them, well, a lot, gives false + information, wrong information, or outdated information. So it's you have to be really careful when you find + something on the Web about accessibility to really be careful who wrote that and when it has been written. That's + really, really important. Free resources are great, but be careful.
+ +SARAH LEWTHWAITE: I think with that, when we've been doing our research with expert teachers, + particularly in industry but also in academia, there's that question of where do you send your learners when they + want to continue their learning journey? So if you are new to teaching accessibility or if you have established + skills but you are aware that you continue to develop them, do reflect on how you continue to develop your skills, + where you go for good knowledge, because that information about how we learn accessibility is really important to + cascade to your colleagues, to your teams. Because obviously, we are aware this is not a static field. This is an + area where we have to keep developing our own learning, even as we are teaching, even as we are researching.
+JADE MATOS CAREW: It's a really good point. At the Open University, my team looks after a lot of + the training and advocacy for digital accessibility. And when we signpost to external resources, we go through a + vetting process to make sure that it's relevant and meaningful for our staff. And every resource that we post, we + make sure that it's dated. And we routinely go through and check those dates because links break and things happen, + things get outdated. So yeah, it's a real exercise in looking after that as a complete resource.
+I am slowly putting links in the Chat, by the way, for everybody. And we've just had a question in: What criteria + do you have in mind when deciding which resources to use and where to look at? How can an expert or even non expert + decide?
+AUDREY MANIEZ: That's difficult. That's difficult because you have to know a little about the + industry to know where, who, which authors are relevant, are great authors or great company or great organizations. + You have to know a little bit about them. The community can help to identify this. We have a brilliant mailing list + that can where you can post questions, ask questions, and the accessibility community will answer you. So I don't + have, really, criteria, but it's important to know who is who in the accessibility field, I think.
+JADE MATOS CAREW: I think the WAI resources as well, they are certainly, because of the way in + which they are designed and made, you know, by a panel of people that come together and work really hard to edit + small words and details in all of those resources, so you know that you can trust them. They've been through a + really rigorous editing process. So my personal, whenever I have to direct someone to a resource, that's always top + of my list. And there's a whole range of resources on that for lots of different needs. Lots of short resources for + simple advocacy as well. + Sarah, do you have any comments on that last question? +
+SARAH LEWTHWAITE: No, except to say, obviously, this is an ongoing issue in a lot of fields, you + know, the quality of online resources is a huge issue for anyone teaching in higher education. It's a question of + how you assess and critique and critically engage with online resources. But as Audrey and Jade have mentioned, + there are awesome, very solid, very well developed resources, good places to start from in terms of the field. + But I do realize this is a particular challenge in accessibility because it is fast moving, and also because so much + education takes place outside of formal learning environments. So you know, you will be learning on the job, + learning by doing. There will be informal training, training organized by different organizations, conferences. + There are a lot of places to learn. And traditionally, those have been where the majority of learning takes place. + So it is a recognized challenge, but it is worth investing time and thought into. +
+ +JADE MATOS CAREW: Well, Sarah, one of your recent research papers, which I'll post the link to in + the Chat, was looking at workplace approaches to digital accessibility education. And you raised the topic of having + a foundational knowledge of accessibility or a baseline knowledge. I was wondering if you could talk to us about + what that includes and who decides what it includes.
+ + +SARAH LEWTHWAITE: That's a big question. Excuse me. So yes, so as I say, with my project, we've + been trying to get close to practice but to look across a variety of different locations, from workplace to higher + education, to understand what characterizes accessibility as a field, as an educational field. So with that, I know + when we looked at some of the questions submitted to this session, people wanted those kinds of tricks and tools and + tips, and that's why we've kind of started in this resource place. But some of the questions that you will have to + ask yourself as an educator are quite fundamental in the sense that different contexts will make different demands, + and different learners will require different resources. And there's a different kind of cohort of central knowledge + that you need to establish. + And we wrote about this foundational approach because we realized that, particularly in the workplace, a lot of + effort is put into bringing people onto the same page. So we recognize that accessibility is a shared endeavor. It's + not located within one role. It shouldn't be about one accessibility expert serving an entire organization. A lot of + people need to know what accessibility is so they can collaborate and recognize it as a shared responsibility, a + professional responsibility.
+So there are lots of dimensions to that, and when you are coming to this as a trainer or somebody trying to build + capacity in your organization, there are a lot of facets that come into play. For example, understanding what prior + learning you have, where your learners are coming from, what perspectives they bring. Where there might be + misconceptions can be also vitally important to helping people on that journey. And you'll be needing to do work + about defining what the core knowledge is for your organization, what your colleagues really need to know, what are + the essential points. And within that, there can be quite complex different worlds of knowledge that you have to + bring together.So for example, we are here talking about Web standards, Web accessibility standards, but there's also a piece about disability awareness, which can be more conceptual. How people understand "normal" I am doing inverted commas what their average user is and try and break that down and unlearn some of the assumptions people bring into organizations, sometimes from their educational pathway to date. So there's this kind of conceptual piece about disability, and there's the technical piece. But then between, there's also a lot of knowledge that people need to gain around how, how to do accessibility in the field, which can be to do with decision making, process, and often collaboration between and across a workflow. + So that, then, introduces issues about whether you are bringing different roles together to learn about accessibility and those fundamentals and how and when you should specialize. I have talked quite a lot there, so... I'll hand over to Audrey because I'd love to know from her side what the kind of view is on that.
+AUDREY MANIEZ: Okay. Thank you, Sarah. Great talk. + So yeah, the core knowledge for everybody in an organization to share on accessibility, as you said about awareness + about disability, a deconstruction, people, about what disabled people can do. I think there's also knowledge about + the political aspects of accessibility that is really, really why we are doing that. + Then for more technical, maybe, it's also important to know the user's need. That's really a key point in + accessibility for all jobs, maybe for designer or developer, whatever. Understand why we are doing things, to + resolve what kind of problem, what kind of issue. That's really the key point for everybody. Understand how users + navigate on the Web, how they access or not access to information. I think that's the basis of knowledge everybody + should share. +
+ + +JADE MATOS CAREW: Does that also include compliance and legislation? This is one of the questions + that we had in from a participant. So what role does that play in foundational training?
+ +AUDREY MANIEZ: Yeah, that can be complex, legislation. So a bit knowing that it's required in some + countries, but yes, knowing of it, it can be really complex. For example, in France, it begins to be really complex, + to follow all the news about the legislation. So yeah, it's important. It's important.
+ +SARAH LEWTHWAITE: I think in teaching, sometimes standards have quite a conflicted role. So some + of our experts talked about how + sometimes they won't use standards. They'll talk more to why this work is important and focus on the user and use + that as the kind of motivating principle for learners. Others talked about compliance in terms of finding ways to + introduce learners to standards without kind of dropping them in at the deep end, to use a metaphor, which means, + you know, using sometimes resources which translate standards into more accessible format for people who are new to + the field. Or maybe starting in a small place, taking parts of WCAG and exploring what they mean to give people that + entry route where they feel they can try things out, that they are applying their learning, and that they can then + move on to look at the broader standards picture themselves, feeling they've already entered and tried and + practiced.
++ But there is also an important conceptual dynamic, which is I think standards are so important to Web accessibility, + but how we present them is also important. So often our experts talk about presenting them as a floor, not a + ceiling, + in the sense that here's what we are going to try and do, and then you want to go and try and get beyond that. Not + that this is what you are aiming for and then you are done. So always encourage developers, designers, content + authors + to use these structures of Web standards, but also to scrutinize what they are doing. So you are not just learning + and + designing to the standard. You are always critiquing your own practice, examining what you are doing, why you are + doing it, how you are doing it, to keep that balance between the kind of the structure versus the creative piece + because creativity is so important in our field. And it's recognizing that Web standards can be part and enable + that; + that they don't close down creativity. Because we know creative learning is so important in terms of getting people + motivated and enjoying their work. +
+JADE MATOS CAREW: In my experience, different types of learners react to standards and guidelines + in different ways. So for some people, especially if they don't have, maybe, a technical role, they can switch off + if you present them with an overwhelming set of technical standards. So in my context, we have a lot of focus on + practical demonstrations and examples rather than going straight to the guidelines. + Do you think that following guidelines and standards helps people keep up with a changing landscape of digital + accessibility? So this is another question which has come in. How can we keep up with evolving ways of accessibility + and how it changes quite quickly sometimes? +
+SARAH LEWTHWAITE: I am going to hand to Audrey. How do you do this in practice?
+ +JADE MATOS CAREW: Okay
+ +AUDREY MANIEZ: How can we evolve, even if we follow the standards, that's the question, that's it. + As you say, the standards are not obstacles. They are just things we have to do to allow people to access Web + accessibility. And that's where it's important to know the user needs. I come back to that because if you know which + goal we are trying to reach, we can imagine lots of solutions. That's why we have to know what our users need, how + they navigate, because that allows people to create new solutions and just be in the success criteria and just yeah, + that's because it's really important. I mean, it's really important to begin the reflection about the thinking with + the user. You have to first you begin with the user, and then you can create a solution, I think.
+ + + +SARAH LEWTHWAITE: I think that's so important because, you know, the accessibility standards are + abstracted knowledge about what disabled people do online, how they use the Web. I think it's great that we've got + so many resources from the W3C that show where these are coming from and why they exist in terms of helping close + that gap with the standards. But yes, if you want to stay ahead of the game, it's always working with the people + whose knowledge is the foundation for accessibility Web standards. So it's talking to your users all your users + recognizing the breadth of your users. And it's also hiring for your teams and making sure that your teams reflect + the world as it is, which means including disability, including disabled people, and recognizing what excuse me + recognizing what we have to bring ourselves to that conversation also.
+ + + +JADE MATOS CAREW: This links to another question that we've had in. Thank you for all of these + questions. Please do keep them coming. And it's about AI. And the question kind of says this might be a bit more + relevant for later, but this is really forward thinking stuff. How are we dealing with all of these kind of future + evolutions, things like AI coming into different areas of accessibility? + And there's even a question there about will the accessibility requirements sort of become redundant with AI doing + most of the work in building websites? Maybe we won't need a need for training in the future. What do you think? + Audrey, what are you seeing in the field? +
+ +AUDREY MANIEZ: Oh, that's a really complex question. I don't think AI will solve every problem of + accessibility. Most of accessibility issues are based on understanding context. Even today, we have automated + testing that tests as really, really little piece of our requirements in accessibility. Well, I am not sure AI will + help more to detect or fix issues. They can help in other fields of accessibility, but to fix issues, that I am not + sure. Well, really, I am not a specialist of AI, really.
+ + +SARAH LEWTHWAITE: Yeah, I think I am sure this will come up for discussion later, and I think + there will be some really interesting answers in that session. But I think the concern I have sort of coming from a + background in disability research and disability studies, critical disability studies, is that data, be it social + statistics, be it those statistical views of populations driven by data tend to be highly normative. Where data is + normative and these ideas of average arise, often people who are positioned on the edge of that are then missing and + often further marginalized. So I have major concerns over AI in terms of what it deems "normal," be that websites do + we think the majority of websites are accessible? What are these tools able to do in view of, as Audrey says, the + changing and the contextual nature of accessibility?
+I think there are some really interesting discussions happening, and there are some good people looking at how you + do work data so it is more inclusive. So Jutta Treviranus talks about the bell curve and the need to cut, take a + lawnmower to the bell curve so that you are always including and weighting data to take care of everybody, + basically. But that may be a slightly different subject to this automation of testing dynamic. But I just think so + often people are looking for ways to cut real people out of the system and the process, and I think it's really + important to recognize the value of authentic experience of our products from our users. +
+ +JADE MATOS CAREW: Are you seeing links between accessibility and AI or XR, AR, VR, and is that + being brought into + training and education for accessibility? New, evolving areas, are they being brought into the curricula, do you + think?
+ +SARAH LEWTHWAITE: I think I don't want to sound downbeat, but I think at the moment, there are + some tussles + happening in the computer science curriculum, which sometimes mean AI is coming in and pushing out other areas. So + some of our educators that we've interviewed talked about the need to kind of harness new fields and make sure our + accessibility is part of that from the get-go. So yeah, we are seeing AI and accessibility courses starting. We are + seeing people putting AI at the heart of XR and VR and also robotics. And there's some really exciting things. + Whether those are coming from the mainstream of those disciplines or whether they are kind of accessibility people + kind of busting in to make things happen I think is less clear. So I can't speak to that overarching picture. But + it's really important to keep accessibility in these innovative spaces because standards and so on tend to come a + step behind just by virtue of how they are made and created.
+ +JADE MATOS CAREW: How can we keep up with that? There's a question in the Chat: How can we cope + with the fact that advice from yesterday may no longer be relevant today because of evolution in technology?
+ +SARAH LEWTHWAITE: I would say, as we said before, it's that perennial problem. It's an ongoing + issue. And where you can, it's maintaining that user research, that accessibility research with real people that's + going to help you bridge that gap. So keep that value in your developmental practice, in your learning practice, and + then look at how you cascade that knowledge through your organizations. Because there is an organizational change + piece here, I think, that we've not talked about yet. And it's a tension for me. My research is very much about what + teachers do, what educators do in that kind of local space of the classroom. But there are also the sociocultural + dynamics that push and pull on what's possible in education, in the industry. And there is that need to think about + the organizational piece. And I know conversations about accessibility maturity and some of these overarching issues + are really important, too.
+ +JADE MATOS CAREW: Well, let's think about that change management piece. It's so relevant to + accessibility and how we handle it in the workplace. Audrey, I think you have a lot of experience in terms of + training in workplace situations. So how in your experience, how is accessibility incorporated into a professional + development context?
+ +AUDREY MANIEZ: We so yeah, we do a lot of training. We train a lot of people that are working in + organizations. We train a lot of developers or auditors. And it's clear that, as you say, you talked about managing + and so on. That's people to train their employee have already a political strategy of accessibility. That is the + first thing that is needed in an organization, private or public. If the director, if the company, has no + accessibility policy, so there's no training in the for the employees. So that's really a global political subject + in companies and in public organizations so that people can access training. So that's it. So yes, we need a clear + strategy in organizations so people can be trained to accessibility. It's not an individual initiative that comes to + training. That's really important. Oh, sorry. So in the workplace, that's what I can do. I can talk well, that's + what I can do.
+ +JADE MATOS CAREW: Well, if we can pick that training apart a little bit. So something that + interests me in particular is moving away from providing just guidance and just one off or ad hoc trainings to + people that perhaps people go through in a very passive way, they don't really engage with the materials. So in your + experience, for both of you, and Sarah as well, you are interested in the pedagogy behind how we can actually make + people do this in reality. So what does the training look like? How can we make it effective and meaningful?
+ +AUDREY MANIEZ: It's really accessibility jobs are really experienced work. Even if you have + followed training for two or five days, like we have, for example, at Access42 for designers or developers, after + that, it's really important to have a time to really train on real projects. For example, at Access42, for our own + needs because we do it. So we train a lot of auditors that we have. So the time they have been teaching, so a + training, it's it can take four to six months for the people to be really independent in their work. Really. So that + can be as you say, you have the knowledge, and then you have to practice to be really effective, to be good at what + you do. And you have to be well, you will be better if you are incorporated in our community.
+ +JADE MATOS CAREW: Mm hmm.
+ +AUDREY MANIEZ: That's really important, to have others to share with, to ask questions, to share + practices. That's really in an organization, it's really important, yeah, community.
+ +JADE MATOS CAREW: What do those communities look like? So for example, at the Open University, we + have social media, internal social media channels. We provide drop ins and lots of opportunities for staff to + network. Different things like that. What kinds of things do you experience?
+ +AUDREY MANIEZ: In our company, for example, it's truly a place to share every day. So like we do + audits every day. So we share every day about what we found in accessibility. So we have a chat where we, each + other, speak, to ask questions, to find help, to fix some issues, et cetera. And what is great that we have chat + that it's like a knowledge base. And if we recruit a new person, he can read all we discussed for a year, two years, + and that's our knowledge base. And that's truly that's our documentation, our own documentation. But that's really, + really interesting. But that's the same with what we have with the mailing list, if I can talk about the WebAIM, + that's really two rich resources that you can search in. That's really, really great documentation too. So yeah, + community share. So that's what we do. And once a month, we all have a meeting together to share in the meeting + video about problems, about harmonize our way to work, our way to tell things, to present things to developers, et + cetera, et cetera. That's what we do.
+ +SARAH LEWTHWAITE: And if I can add, when we've spoken to, basically, government experts about how + they build those large scale communities, so if you do have these Q&A spaces, questioning spaces for people to trade + information and, you know, knowledge that's really specific to your organization, we've seen strategies by the + managers of those lists where the experts will purposefully hold back slightly when a question is raised so that + there's an opportunity for people in the community to start to express their expertise and practice and bring to the + table, maybe for the first time, their knowledge.
++ And then you've still got that kind of safety net of the experts + on the list ready to step in if there's any accidents or if anything is slightly incorrect. So if you are building + these sort of online spaces where you are sharing information, think about ways to help bring the new people on + board and let them step into their expertise and express what they know to help build that expert capacity more + broadly. So it's not always falling down to the champions of accessibility to be the one person who knows + everything. Because we know that model is precarious. If that person leaves, you lose the expertise. So, so much of + this is about broadening the knowledge base. And I know many people talk about the importance of everybody knowing a + little bit about accessibility. It's from this point then we can sort of build and build up the expertise. +
+ +JADE MATOS CAREW: We have a really good system at the OU where if we get asked a question, the + first stage is to + direct them to our internal social media, to ask there. And also, Audrey, as you were saying, search through what's + happened before and whether it's been asked in the past. That's a really, really useful tool to have. But also it + encourages other people who aren't accessibility champions to jump in and answer and share their expertise. And then + if we still can't have the question answered in that space, that's when members of our team will come in and try and + give an answer from an expert perspective. Thank you. Sarah, I want to ask you about what skills so we've spoken a + lot about informal ways of sharing knowledge, but what about the formal ways? So what kind of skills do people need + to teach accessibility effectively?
+SARAH LEWTHWAITE: The skills to teach accessibility?
+JADE MATOS CAREW: Mm hmm.
++ SARAH LEWTHWAITE: Well, so I think one of the reasons I started my project was because I was aware + that + sometimes, particularly in academia, where maybe there's more teaching knowledge, more teaching experience, there + isn't necessarily the accessibility expertise that you see in industry. And likewise, in industry, I think a lot of + teaching knowledge is quite hard won by doing the teaching and gaining the knowledge that way. + So I was interested in how the pedagogic knowledge and the content knowledge, the knowledge about accessibility, are + fused together. So what the teaching of accessibility requires specifically. And how to kind of build that knowledge + up through research and cross case research. So I would if you are on this call, there's a lot of open access + research about the teaching of accessibility, which I think often isn't where we first go when we are designing + teaching; right? There are shared curricula. There are research papers which you can draw on. We wanted to do cross + case research so we could look at a variety of contexts and what's important in those contexts. And of course, it + does vary depending on who your learners are and what you are trying to do. + So some of the questions that I would put to people on the call is about establishing what your learners need to + know about accessibility, what is essential, what are your learning objectives? Try to set those and be clear with + yourself so that you can then kind of put those into action. And it's difficult because I also recognize there's a + lot of expertise in this room that we can't see. So you know, it's recognizing that. +
++ Alongside these accessibility communities we've talked about, I think there's a real need for teaching accessibility + communities, places for teachers and trainers to share what they do and start reflecting on what they do, naming it. + So don't be afraid of pedagogic language and start to think about, you know, reflexive practitioner, thinking about + learning by doing rather than learning through trial and error. You know, how do you when you are getting your teams + to do projects, as Audrey described, when people are practicing in the field or in simulated situations, if you are + teaching a graduate program and you are running project based learning with your learners, there are a range of + things that you can put in place around that team to help them, to support them with resources, to check in around + what skills that team might need. +
++ But I suppose I am talking around a range of issues. But I think I want to come back to that key point around + disability awareness by understanding the users, understanding disability, thinking again about ourselves, really. + That awareness piece being so fundamental. And then with that, this process piece about the doing of accessibility, + how are you going to give your learners opportunities to put knowledge into practice? And then also the technical + piece, that there will be a certain range of techniques coding, et cetera that is also part of that kind of learning + by doing. So it's bringing together those three, but recognizing that they are quite different worlds of knowledge + that you are having to bring into like synthesize together. So you will have learners who are much happier coding, + and you will have other learners who are much happier getting into the usability piece, trying to understand what + people want and need and thinking about complex issues. Overall, accessibility does deal with uncertain knowledge. + You know, we have to work hard to know what to do in any given situation. There aren't always straight answers. And + Web standards take us so far, but they can't answer all the questions we have about what our users need. +
++ Now, for some learners, that's deeply uncomfortable. They want to know what to do in any given situation. So I think + and it's a real expert competency, dealing with uncertainty is one of those markers of expert knowledge in a vast + majority of fields. But for us in accessibility, it's kind of like dead center. So I think often our experts, you + know, and do read the papers that have been shared in the Chat, and love to hear your thoughts on that as well + because obviously, this is a huge field. I am not saying we've answered anywhere near all the questions. We are just + getting started looking at this piece. But recognizing that that uncertain knowledge, you know, working between + compliance versus the realities of complex everyday experience, is a challenging space. And it has a range of expert + competencies that you need to grow. And for some, it will be uncomfortable. So part of it is often bringing that to + examples that are as clear as possible. +
++ So often when we've spoken to people in organizations like Audrey's, if you are going into an organization, you want + to show them their own websites and how they might work or how they might not work. When you are talking about + disabled people, you might want to be naming members of your team and saying, you know, is this going to work for + Jeff? Is this going to work for me? You know, like always trying to bring it back to something concrete, something + real, so it's no longer abstract and somewhere else. Because the reality is much closer. It's in our everybody's + world. +
+ ++ JADE MATOS CAREW: We've had success with that at the OU, when we developed our curricula for + developer training. + So using the WAI curricula modules. And using them as the foundation to add kind of really relevant contextual case + studies, examples, demos. So we've had success with that and making it really relevant to our audience. + And another thing we had success with was accountability partnering, so pairing up people from around the OU from + different staff groups and having a really practical example of using guidance and training to making real fixes in + their own documents or designs. So that's a really useful thing that we've come across. + Where was I going next? There's been a question in the chat, it's a massive one: How can we integrate accessibility + into the university curriculum? So taking into account the various roles within the accessibility field and their + associated processes. So does anybody want to take that? Sarah, I think that's probably another one for you. +
++ SARAH LEWTHWAITE: All I can say is it's going to take a big effort. Because I think, I mean, I've + drawn a + distinction between academia and the workplace, but I recognize that the university is a workplace. And as a + workplace, it's quite a complicated workplace. So I think it has to be an effort at a number of different levels + that run across a range of different groups. I mean, really, I should throw it back to you, Jade, because I know the + Open University is world leading in so much of this. But I think there's a lot of work that's been done around + accessibility maturity for higher education. There's really great conferences and networks, so I am thinking of + HighEdWeb and their Accessibility Summit, which I think is annual. Obviously, there are lots of I think you've + posted the mailing list on assistive technologies, which serves learning developer communities particularly. And + there are, obviously, disability support dynamics as well. +
++ I think the challenge for higher education at the moment is that accessibility is largely thought of as being for + students. And it doesn't recognize our disabled staff at the level it should. And it doesn't recognize, in that + respect, the kind of platforms that staff have to use, that researchers have to use, and that it's about more than + just serving our disabled students. It's about the entire university estate. So for me, it's an accessibility + maturity question, and I know there's really great people working in this space. I know AbilityNet have done a lot + of really good stuff about accessibility maturity and higher education. So if you are looking at that piece, that's + where I would direct you to go. But I think it's always a work in progress. But I also think the in Europe in + particular, you know, the new regs on mobile accessibility and the Web mean that our universities are being audited + now for the first time on their public facing accessibility. And that's a really teachable moment for the sector in + terms of universities trying to turn their ships around. I think we deal with a lot of legacy systems in particular, + which are troublesome. But in my experience, in the UK, certainly, it's becoming more positive in that beyond just + serving our students and recognizing our duties and commitments to them, there's a growing understanding of the + responsibility that we have to serving wider publics. And I think there's more mobilizing of political dimensions + amongst staff to fully recognize the breadth and diversity of our staff groups. +
++ As I say that, I know disability can be at the bottom of the list within even equality, diversity, and inclusion + agendas. But I do want to be hopeful about us trying to make changes where we can and using these opportunities to + put disability and accessibility front and center. + Over to you, Jade. Now tell us what you are doing at the OU. +
+JADE MATOS CAREW: I was going to throw it to Audrey, actually, and ask ask about barriers to + embedding + accessibility into your workplace curriculums and how you deal with staff training. So barriers to embedding + accessibility into your training and your curriculum.
+AUDREY MANIEZ: In training, you mean in university or in general?
+JADE MATOS CAREW: In general workplace. So in your practical experience.
++ AUDREY MANIEZ: Okay. The first barrier is always the same, it's the political barrier. Like if the + direction I + want to train people, so people will be trained, we can be faced with the problem of accessibility of the material + itself. So tools that we are using to teach, so that's a problem when you have disabled students to train, and the + content we deliver to people, that content, those are main barriers to train accessibility. That's mainly that. + And I like when Sarah said in training, students want an answer to each problem. That's a barrier in training too. + Because they want a clear answer to each problem they will be faced with in the real world, and we can't give them + that. But teaching or barriers, that's all I can say, the tools are really a big problem. Because the learning tools + really are not accessible at all. Can't allow us to give accessible content to our students, that's a big problem. +
+JADE MATOS CAREW: So in what ways are the tools so you mentioned the technical requirements of + tools. So what + kind of barriers do you see there?
++ AUDREY MANIEZ: The technical requirements? Yeah, they are to be WCAG compliant, and they are not + WCAG compliant. + Tools are really like LMS, they are really not taking accessibility in their roadmap. A few a really few tools do + that. So that's it. + I made a little study last year on CS tools, and we found over 30 tools for, like, LMS, just one list in + accessibility report, it's really a few tools. So those tools can't give accessible interfaces to students, and + that's a big problem. And the most in university. +
++ JADE MATOS CAREW: Okay. Thank you. I am just trying to keep up with the chat here and check to see + if there are + any questions. Sorry. Just bear with me for a moment. + Sarah, are we are you Sarah, sorry, did you mention that you are leaving or are you staying before I direct any + questions? +
+SARAH LEWTHWAITE: I am afraid I am aware this session was running until 2:00 sorry, 2:00 local + time. I + appreciate it's different in Europe. So I only have a couple more minutes.
+JADE MATOS CAREW: Okay. I am just wondering, there was one other question here for a university + context. In a + university, you likely have the opportunity to integrate accessibility into other disciplines so engineering, + political science, lots of different things. Do we have any examples of how that's happened, where that's happened, + any success stories?
++ SARAH LEWTHWAITE: I mean, I think it is happening. I am not sure there's visibility. And I think + that's one of + the challenges as a field is regarding, for example, just the level, knowledge of where and how accessibility is + being taught. So I am aware, Kristen Shinohara at RIT did that survey of colleagues looking at the teaching of + accessibility across the USA, and you know, whether it's appearing in software engineering, and other fields. I + think that there is just a piece to be done about where accessibility is because at the moment, you only really see + the specialist departments publicizing online where it's being taught. So you know, you will see that at some in the + UK, say at Dundee, at the Open University, at other leading institutes, but it's difficult to know where and how it + is being taught. +
+Of course, it is being embedded, so I would say look at what research is coming out about this, and I think there + is + a lot of work about teaching accessibly, you know, which I know the University of Washington have done a lot on in a + range of fields. So that is building up. But it's a difficult picture to assess, and I think if you are somebody + watching this and you have conduit to some of the professional organizations, there is that question of that raising + knowledge of where it's being done, how it's being done, and how it's being done well, but I am sorry I don't have + those answers. But I think in the next phase of my research, I am very interested in trying to look into that more + fully. I am going to have to step away, so thank you very much, everybody. +
+JADE MATOS CAREW: Thank you, Sarah.
++ AUDREY MANIEZ: In France, we have a degree at the University of La Reunion that is focused only on + accessibility, + to train accessibility managers to create they are trained to create an accessibility culture inside organizations + to manage accessibility requirements, audits, trainings, et cetera. So that's a real degree. That's the first year. + That's a really, really great project. Yes, we are really proud. So that's really and we can see some units of + teaching about accessibility in some degree at university. Really few, but you can find some accessibility words + sometimes in the degree programs. So that's a little shy, but yes, maybe that will come a little more in the future. + And I think that's linked with the job market needs. Since the job market, jobs do not require accessibility skills, + university won't train people on accessibility. I mean, we have to we need to have a real need in the job market + about the skills. Organizations have to put accessibility in job requirements so it can be a real skill to have, and + I think it can be a circle from that. +
++ JADE MATOS CAREW: That's one of the ways that we are looking at this at the Open University, so + making sure that + accessibility is visible in everything that we do. So if we are talking about accessibility, if we are hosting a + presentation, that presentation needs to be accessible in itself. And I think this is really important for students + to see as well, that accessibility is being prioritized and that it's visible in learning spaces. And I suppose that + means that it's a more holistic approach holistic and informal approach to advocate for accessibility and raising + awareness and building those skills in an informal way. + Shall we have a look at the chat and see if there's anything we haven't answered before we move away from this? Just + seeing if there's anything I have missed. Thank you for all of your questions. There have been some really good + ones. + There was a question which says could you recommend any courses that teach the accessibility requirements outlined + in EN 301 549 in plain language? + I suppose we'll direct you to right at the beginning of the chat, we posted links to some of our favorite resources. + In particular, the WAI resources from the W3C. And on those, there's an Accessibility Fundamentals course. +
+AUDREY MANIEZ: Yeah, maybe not about the EN, but yeah.
++ JADE MATOS CAREW: Maybe not in particular, but I suppose it's the reason it's beneficial, + obviously, is because + it's referencing the most up to date materials. + Do you have anything that you'd like to recommend, Audrey? +
+AUDREY MANIEZ: About the EN, no, I don't have. Just the document itself. But no, I have nothing + else to + recommend that we have.
++ JADE MATOS CAREW: Are there any other final questions, perhaps, that haven't been asked or anything + that I have + missed that's relevant to our conversation about education and training? + Anything else? + What is your position this is a good one that's just come in. What is your position on certifications such as the + IAAP, which is the International Association of Accessibility Practitioners? Audrey, that's a good one for you + because you'll have a lot of familiarity with this. +
++ AUDREY MANIEZ: With the certification? With IAAP, I don't have IAAP. My position is that it's not + required to be + a good professional. We have really good professionals that don't have a certification. But for some people, that + gives them a structure to a point, a goal. That can be great to have the certification. I don't know the content of + the certification, so I can't tell if it's a good one or not. But the concept, the thing is something good because + you have a certificate, you have proved you can do things, and that's great. + We, too, do some certification at Access42. We do training, so people have to do some things, we evaluate them, and + we give or not give certification. And that's great for some people to find a job because they can prove to their + employer they are capable of doing what is written on the certificate. +
+JADE MATOS CAREW: I agree. Actually, I think that it demonstrates a commitment to accessibility, a + professional + commitment to it. And from my experience with IAAP, the content of that exam is quite broad and wide ranging. And it + really enables somebody to focus on upskilling their knowledge in that area. So I think they are, on the whole, + positive.
+AUDREY MANIEZ: Okay.
++ JADE MATOS CAREW: I think they are still quite new, though, so we've yet to see the impact fully of + these + certifications. + I've just noticed that Sarah has dropped back in, into the meeting. Do you have anything to add on certifications in + your experience? +
+SARAH LEWTHWAITE: I am sorry, I may have a slightly noisy background, but I think the + certification's really + important, as the reasons you've raised. I think the only challenge sometimes is and stop me, Jade, if it is too + noisy in the background...
+JADE MATOS CAREW: It's okay.
+SARAH LEWTHWAITE: ... is the cultural dimension, is the different territories have slightly + different requirements. + And sometimes sensitizing those kinds of certifications for, say, UK or U.S. or India, it's really important. And I + think that's something the IAAP are doing, and that's really great.
+JADE MATOS CAREW: Agree. + Right. We'll close there and hand back over to Carlos. Thank you so much, Audrey and Sarah, for your time today and + for your answers. I've got a couple more links that I'd like to post in the chat just to a couple more places that + we compiled before we met today. And thank you for those who have posted links in the chat also and for your + questions. So handing over, Carlos. + +
++ CARLOS DUARTE: Thank you so much, Jade, and thank you also Sarah and Audrey. It was a really, + really great discussion. It's great to see that there's also a lot of positive feedback coming into the chat. + And we'll now have a ten minute break, so we will be back at a quarter past top of the hour. Jade is busy still + pushing more links into the chat. Great. If any of the panelists also would like to answer some of the questions + that you haven't been able to tackle live, they are still in the Q&A, so please do. And yeah, in 10 minutes, we'll + be back for our second session on mobile accessibility, so see you in a while. +
+{% include excol.html type="end" %} + +## Session 2: Mobile Accessibility +- Moderator: Detlev Fischer (DIAS GmbH, DE) +- André Pimenta Freire (Universidade Federal de Lavras, BR) +- Paul van Workum (Digitoegankelijk / Abra / Appt foundation, NL) + +{% include excol.html type="start" id="session2-transcription" %} +### Transcript of Session 2: Mobile Accessibility +{:#session2-transcript} + +{% include excol.html type="middle" %} ++ CARLOS DUARTE: Okay. I think we are ready to begin our second session. I hope everyone enjoyed the + break. Just + another reminder. You can use the Q&A to pose any questions to the session participants, and we'll also be + monitoring the chat, but we are mostly using that for any technical issues or for the session participants to share + any links to resources that are relevant. + And so for the second session, the topic will be Mobile Accessibility. Detlev from Germany, from DIAS, is going to + moderate it. And our two panelists will be André from Universidade Federal de Lavras in Brazil, and Paul from + Digital Accessibility, in English, the English translation, in the Netherlands, will be joining us for the next + hour. You can take it away, Detlev. +
+DETLEV FISCHER: Hello, and welcome. I am trying to emulate what Jade said because it's always + difficult for me + to find the right format for introducing everyone. And we had I was prepared to introduce the panelists, but I think + it's probably better if they introduce themselves. Carlos has already given the names, so before we start with our + topic, I would just like you, both of you, to just spend a minute to just say who you are and what you are doing, + and I'll add to that, and then we can start. Do you want to start, Paul?
++ PAUL VAN WORKUM: Yes, that's fine. Yes, I am Paul van Workum, and I am working now a few years in + the field of + app accessibility. I created I am on one of the founders of the Appt Foundation, and we created a knowledge based + platform with a lot of information about app specific things, like how do assistive technologies work, and how do + the yeah, how can you fix certain issues in certain code bases. So that's an interesting resource, I think. + Besides that, we have a company where we do everything about app accessibility, from training to testing to user + testing, and also have an app organization with evolving on maturity level. Besides that, I work at the Dutch + Government helping there with the most critical apps and doing some basically supplier management, to help suppliers + of apps to governments, to help with becoming accessible. That's it. Andre? +
+DETLEV FISCHER: Do you want to pick up, Andre, and say a few words about yourself?
++ ANDRE PIMENTA FREIRE: Yeah, sure. Hello, everyone. First of all, thanks to Letícia, Carlos and all + the + organizers, and thanks to Detlev and Paul for sharing the session. I think we have a very good time here sharing + some lessons learned and challenges of evaluating the accessibility of mobile apps. + I am an assistant professor at the Federal University of Lavras. Officially in the field of human computer + interaction. I also do teaching in optional courses on accessibility. And we've done some research, among other + things, on mobile accessibility, including evaluation. So I hope we can share a couple of lessons we've learned in + looking at a couple of different issues on mobile accessibility evaluation. From technical issues we've done jointly + with colleagues on automated evaluation on how to do manual audit, auditing of mobile accessibility, work on + different platforms, and even some more recent studies we've done on the policy level. We may have a couple of + particular issues in Brazil to share, which might be applicable to many other developing countries on having what's + not such bad regulation and legislation on accessibility in general but what is now, we say well, I think it's eight + years old accessibility legislation covering digital accessibility. Which just left mobile accessibility out. And we + have looked into how surveillance and law enforcement has worked in that scenario. We have some recent advancements + in Brazil. Reinaldo Ferraz is here, he has done a lot of work with the brazilian national regulatory body to put out + a new set of guidelines specifically for mobile accessibility in the end of 2022. So hope we can share a couple of + lessons, both from the technical side going through to processes in universities and research agencies and in + companies, and what we've seen in policies in different countries, both from advanced legislation, such as in + European Union, and other countries that are kind of catching up with that. So looking forward to very nice + discussions here. +
++ DETLEV FISCHER: Okay. Thank you both. I will just say just a few words about me so you know who I + am. I am here + managing director of DIAS, which is a company specialized in accessibility testing and consulting. And I have been a + member of the Accessibility Guidelines Working Group of W3C for I think about 12 years now. So I am actively + involved + in shaping the new WCAG 3 standard, which is kind of a difficult thing or challenging thing. So I've also been + involved in policy consulting in the sense that I have been a member of the WADEX, Web Accessibility Directive + Expert + Group, that helped the European Commission to devise monitoring scheme so that the Web Accessibility Directive can + be + monitored across the Member States. So that was interesting as well. +
++ Yeah, that's my personal background. And I thought I'd start with a very, very quick run on, to give some context of + what we are discussing today. And that starts with the name of the session itself. It's called "Mobile + Accessibility." + That has been a bit under attack within the Working Group, the Web Accessibility Guidelines Working Group. Because + it's increasingly a misnomer. There was a time when it was perceived as separate; there were the mobile apps on the + smartphones, and there was the world of the desktop. But we increasingly see that apps are also run on a tablet, + which + has a keyboard, so there's keyboard accessibility issues there. And we also see that increasingly desktop + environments + have touch, for example, something which was not at least not common ten years ago or not available ten years ago. + So + those two worlds seem to be slowly growing together, and we cannot make assumptions anymore so clearly as we used to + do in the past. And I think the result is that the Working Group has moved away from calling out mobile + accessibility + to something which is more looking at different input modalities. So you basically have two different input + modalities. One is coordinate based, if you like, so that's your pointer. Whether you guide that with a mouse or + with + your finger on the touchscreen or whether you guide it with a grid when you are a speech input user, you can guide a + pointer, a virtual pointer. And the other thing is the traditional keyboard method or input modality, where you have + other assistive technologies which are based on that. For example, a switch user has some motor disabilities and + uses + a switch to interact with a webpage, for example, or an app, they would need a good keyboard accessibility to + operate + things. So that's the way things are going. So mobile accessibility as a term, I think, is probably on the way out. + But that doesn't mean that it's not interesting to discuss the particular issues we have. +
++ So one of the things we are faced with as evaluators is there are no specific or at least in the normative space, in + the space of European norms and standards and directives, there are no specific mobile guidelines. They are + basically + all derived from Web guidelines. They have just taken out six of the Web accessibility requirements and basically + put + the rest into Chapter 11 of the European norm, which is called Software, and we are now supposed to use those to + evaluate apps because they count as software. And obviously, there are some problems with that. And that's what we + are + probably talking about later on in more detail, at what points you see there are differences where the standard + requirement cannot easily be applied to apps, and what can we do about that? +
++ So there also have been some requirements which have been around in other recommendations for mobile accessibility + for + some time, which have only very recently become standard requirements. For example, WCAG 2.2 now has something + called + "target size" which gives us some idea of how big a target should be, and that has never been a standard requirement + before. But several recommendations for mobile apps and accessibility of apps have included that. And also the + framework of the big two operating system providers, Apple and Android, have their own guidelines which have things + like recommended touch target size, for example. +
++ So it's an interesting field. The thing is because of the difficulty of applying requirements which have been + written + for Web onto the mobile space, we also have the problem that in quite a few places, it's very difficult to apply + them + correctly. And you have a fairly high margin of error or of uncertainty where you think, you know, is this a + reliable + assessment? Especially in a context of conformance assessments, or if we are looking at it from a for example, from + a + monitoring perspective, where some public body needs to say this is accessible, you know, you have met all the + requirements, you know, is that easy to do? And how do we do that? +
++ So the big difference, of course, is apps normally come in a windowless environment. Usually they are in a + windowless + environment, not like the desktop. And they are often exclusively designed for touch. And we see that when we + evaluate + things, that most of the problems we find are in the area of linear access. For example, if I turn on a screen + reader + and want to traverse elements, things are not accessible. Or if I connect a keyboard, I don't have proper keyboard + accessibility. And that's simply because people think, well, this is for the mobile use, so you know, these are apps + on the smartphone. They are exclusively used by touch input. So the rest is not relevant. But the Standard says it + must be keyboard accessible. And also, if you have a screen reader turned on, then you have to have linear + accessibility, and you have to make sure that all the elements you encounter give you their proper name and give you + their role so that a blind user, for example, will know what to do with them. So that's the situation we are in. + And another important difference is that the apps are not open for coding inspection. So when we evaluate websites, + we + can turn on the developer tools, for example, and look at the source code of the page. And we can check things + there. + That's not available when we audit apps. Like in normal cases, I know that Paul has recommended as part of the app + procedure that the auditor inquires what kind of platform or what kind of developing environment has been used, and + that's certainly fine. But in our practice, when we normally audit apps, we don't have that information, simply. We + may get it, but even then it may be difficult for us to know what exactly technically needs to be done to make + things + accessible. Because there are so many different developing environments. So we don't have that openness of code. +
++ And + also, we have a much less extensive tool set, and that's also something Paul has indicated they have some ideas on + how + to support or to increase, improve the tool set for us to evaluate. We don't have, for example, the bookmarklets we + have in the website to check things. We don't have the developer tools where we can run plugins, giving us automated + tests. There's some of that, like there's an accessibility scanner at Android, and there may be some others which we + hear about, but it's a much less broad and powerful tool set we have at the moment. And that means that for testing, + we have a very strong reliance on using the screen reader to work out whether all the elements in the app have + proper + accessibility. So you turn it on, and then you can hear what accessible name is behind an element or whether it can + focus and, you know, whether it has the right role and whether you know, for example, as a blind user, what to + expect + and how it will behave. +
++ So there's also another difference that the operating system for apps is now what the browser is for the Web space. + You know, the accommodations you can make in the browser, they are not available, but there are other accommodations + you can make as a disabled user on the operating system level. And there are a number of questions with that + regarding + conformance. Is it enough if you can, for example, improve contrast of elements on the operating system level, and + then you meet the requirements? Or is the author responsible? So this whole question, what is the operating system's + responsibility, and what is the author's responsibility, we have here in this field, which we'll get back to, I + think. + So I think that's probably enough for now. +
++ Just to maybe open with the participant questions, we had a number of + participant questions that were already given to us before the meeting, and I just mentioned them briefly, the + topics + that we had. One is around available tool sets, and that's certainly something that maybe both Andre and Paul have + something to contribute. There's also something about testing guidance, you know, how do we know how to test, when + is + something okay, when is something not okay? There's a scarcity of information on that, I think. + The next is, you know, there are different platform service capabilities with apps. You may not always be able to + meet + the requirements of the European norm, for example, depending on how you interpret it. So how do we deal with that? + Another topic that was raised that we will cover, I hope, is Web views. Web views means you have an app which is a + native app, but within that app, you have areas, you have views where people just pull in information from the Web, + and that often creates difficulties because you suddenly have a break in the navigation. You may not be able to + focus + those Web views easily or get out of them easily. They may behave quite differently or may even carry a different + navigation compared to your native app. So that's an interesting question, how do we deal with that? + And there was one specific question on reflow, which we may also cover. What is the requirement for reflow? That + means + that, for example, if you zoom in on in a Web browser, you get a different design, which is also used on a mobile + phone with a little hamburger icon often for the navigation. You know, what does this requirement that content can + reflow when you zoom in, what does that mean for a platform which doesn't have a zoom in most of the time, where the + magnification of text normally happens in the operating system by turning on the zoom accessibility function? So + those + are the four questions I just briefly report. +
++ Maybe we start with the tool sets because I think several questions honed in on that. What are the ways or are there + good tool sets to help us in this difficult evaluation task of mobile apps? Does one of you want to pick that + question + up? +
+ +PAUL VAN WORKUM: Yeah, I would be willing to start.
+DETLEV FISCHER: Mm hmm.
+PAUL VAN WORKUM: I think that there's a few levels. One is the process, like how to approach the + testing. And + yeah, in the Netherlands, each government is responsible for each website and each app to make a full report based + on WCAG EM. But for apps, it's quite challenging to have WCAG EM evaluation to result in the same results for all + auditing firms. And it's because some firms are trying to find the URLs, and then they find one URL that's just App + Store or Google Play Store link, and there is no way, of let's say, cutting it in pieces and making a sample because + there's no URLs available because there's no URLs. So what we try to do with the process is you identify an app + itself differently than the website. So probably you need the version number because you can't download it like you + can do with the website.
+Secondly, like a screen would be a good alternative for a page. So then if you have a lot of screens identified as + your scope, you are able to make a sample. Like certain things we wrote differently in the Appt Evaluation Method. + That's basically for the process. You need to have a different process to test. And we identified some key + differences. And we did it in the assignment of the Dutch Government, and it was quite a small project. But I think + it would be very interesting to see how you can do it, how you can make the kind of evaluation method compared to + WCAG EM, that a lot of companies are using and it could be the next standard. Because we see in the Netherlands + auditing firms are doing it differently because there's no one way, one evaluation method that is described in such + a way that each company does it in the same way. So that's I think it's an interesting source, and it's the best + that we've found. But I think there are so many there's a lot of things to do here still on this process.
++ And then, of course, you have the interpretation of WCAG or the EN standard. You gave already a few examples. And + yeah, so many things going on there. And some things you can't test. + Maybe I will give some examples. Because everyone knows that on the website, there's the language of the page should + be identified. And also the language of parts of the page, let's say a linear, should be identified. + In software and mobile app is software it means that the software, the language of the software should be + identified. So let's say that I have a Dutch app and identify the language as Dutch. Meaning that I am, like say I + am complying to the EN standard. The second, like 3.1.2, it's like language of parts, I could read out everything in + Chinese. So if when I am testing my screen reader reads out everything wrong because it's not using the Dutch + language, but on the scale of the app, the language is set correctly, so I am not able to test it. But I am also not + able to fill them on this criteria because they did set the language of the app correctly. This is one example. +
+I can give you another. Second one is even more funnier. We have an automated testing solution. We are using it + ourselves already. What we see there is that companies are adding the role to the name. So they add in the name + field, they add in, let's say, "login button," that's the name, but the role is empty. As auditing firms, if you + can't go to the source code, you can't know what they programmed, so you are dependent on the screen reader. And the + screen reader reads out, in both cases, login, comma, button. And of course, sometimes I notice already it's not a + comma but a dot or two spaces. I was like hmm, probably they are cheating on me. But this is what happens. This is + what happens. Yeah. But that's, I think, the two things that, at least in the Netherlands, we are trying to identify + how to deal with this. And it's not clear yet.
+DETLEV FISCHER: I noticed that most resources you find, I mean, what you have mentioned, WCAG EM, + that's the Web + Content Accessibility Guidelines Evaluation Method, which has been developed a number of years ago, that has defined + certain steps that you need to do, for example, to set up a representative sample of websites and so on. But what + you can and what you cannot exclude. And all that I think can be used on apps in a similar way. But all that does + not give you anything on how you actually evaluate particular success criteria. So there's nothing in WCAG EM at all + about what does it mean, reflow? What does it mean, text size, resize text? What does it mean for an app? So I think + at that point, it becomes more interesting to see, okay, what are the particular testing processes or testing + procedures that exist for mobile apps? My suspicion is often companies doing this may not be willing to detail the + exact method they use to do this. So you end up with this very general statements in the European norm or in WCAG + and then have to wonder and scratch your head, what does it actually mean? How do you work it out? How would you, + Andre, tackle that? Are there test procedures you use for auditing mobile apps? Which ones are you using, how do you + see that situation?
++ ANDRE PIMENTA FREIRE: Thanks for the question, Detlev. I think it's a very relevant issue. + How we deal with the guidelines, how we deal with the testing procedures when there's so little defined, like ground + rules, well established procedures as we have for the Web. + What I have noticed, looking from the research perspective, is that many companies have stepped in and defined their + sets of guidelines for native apps, their sets of testing procedures, so we've come across a couple of guidelines + sets and testing procedures from companies. + +
++ So the BBC have defined their set of guidelines with a couple of indications of how to test their proposed + guidelines. We followed the work of some of our colleagues in Brazil, working alongside some in Brazil, to define a + set of guidelines to native apps and how they test. So many companies are stepping in and defining their own sets of + procedures. And it was one of the guidelines, for example, we found in a research study with practitioners that, for + example, they found it easier to test some of the BBC's guidelines with well defined testing procedures. They found + it easier than to map the WCAG Guidelines for mobile apps, which still don't have a lot of the sufficient and + advisory techniques attached to them, which is where you find the well defined testing procedures. They found that + easier. So having well defined procedures to test specific guidelines. + So I think this is still an open issue. We have to work on them. But for practical terms, I think it's interesting + to look around at what specific companies have done to try and approach that while we don't have the well defined + procedures. +
++ And in terms of the tools, that's not something I have done research specifically on, but I have collaborated with + some colleagues and have a lot of very interesting challenges in native mobile apps, as you mentioned. Like we don't + have pages, so what screen should we check and evaluate when we are doing our sampling? That's very challenging to + choose. And we have to find specific rules and guidance on how to do that. + On the other hand, what I have noticed from some collaboration I have done with some colleagues, specifically from + software engineering, they are coming from testing, what they've done is when they are doing testing for mobile + apps, native mobile apps, they are employing techniques they are bringing from different testing techniques they + have, and the approach is similar to, as we mentioned, Accessibility Scanner. But in research, some researchers have + already advanced on that and tried to exploit some techniques that you can use to simulate different interactions + you have with interface components. Maybe in the future, by exploring more of the accessibility APIs to dig in that + information and bring some more information, maybe we'll also have a lot of difficulties when you compare to the Web + world, we could also have some advantages by bringing different techniques that were more challenging to employ in + the Web world but then bringing a lot of advancements that we have had in software engineering, in software testing, + to mobile accessibility testing. We could have some new approaches we haven't seen in automated Web evaluation tools + either. + So I see that it's a field with a lot of open questions, how to sample, how do we have well defined rules to test + and to evaluate and to report on those issues? From the research perspective, I also see that we have a lot of good + opportunities. +
++ In the Brazilian context, as Paul was mentioning what's going on with European standard using the software + guidelines and trying to map them onto mobile accessibility, Brazil has been kind of a funny situation. Our current + law for accessibility in general is from 2015, and it broadened the scope for public and private organizations, + which was the previous law was very limited. It only encompassed the federal government and the executive power. + It's much broader now. However, the law says that all private and public organizations should have accessible + websites, which doesn't include mobile apps. So we are kind of in a gray area in the country. + On the other hand, as I mentioned earlier and I will post it on the link there. I am sorry those who want, this in + Portuguese, I think we only have the title in English. But as I mentioned, Reinaldo Ferraz here with us was working + very closely on that working group. We have a very specific set of guidelines for native mobile apps. But it's only + one year old. It was released at the end of 2022. And I haven't seen a lot of work in the country in terms of + defining a set of guidelines to evaluate it. As I mentioned, the law doesn't point to that standard, so there's no + law enforcement activity in that sense. +
++ So I think we might be in for a lot of work in terms of having specific evaluation tools for that and procedures, + but we don't have, at the moment, as I mentioned, it's still very early days. It's been published only a year ago. + So there are many people getting acquainted to the new standard. But still, I agree with a lot of what Paul and, had + discussed. There's a lot to do on reporting procedures, standards, and working on the automated evaluation tools. + But again, we might have to look at it in a different way, looking at the opportunities we have by the different + ways of working with it, but also seeing the differences we have from the Web world and having more work that I + think we are going to see in future versions of WCAG and other standards as well. +
+DETLEV FISCHER: Paul, can you add to that regarding the automated opportunities? I think you + mentioned that Abra + is also working on some automated solutions. What can it do, and what are the opportunities, in your view?
+PAUL VAN WORKUM: Maybe I want to react on that as well. What I see is that, of course I see two + things. I see + one, that in the Netherlands, like I am only working in the accessibility field for three years, and I am spending, + together with the people around me, around 80%, 90% of my time on product development. So we are really digging into + certain things. That's why we were able to gain so much knowledge in such short time.
+But what I see is that there are some experts from the Web stating to me that apps are the same as websites. And + that's what I find really interesting is that there's a lot of experts that know in Web accessibility that you + should look at all the small details. In apps, that's quite frustrating if you are looking at the details and you + can't go to the source code and you can't you don't know the test rules are not working, and so you have to + interpret things. And that basically means that, like two things. One, apps are generally shitty. So there's a lot + of improvements that can be done. So let's try to make first big steps for users. Because I think that's why I am in + this field. I want to help users to help apps. And if you don't know it, think about the principles, like is it + operable for everyone? So with a keyboard, with a and is it, like, a really big issue if it's broken? That's, I + think you can quite easily make a big step. And then, of course, in the details, then it gets complicated. And then + it gets also a lot of discussion because then people agree differently. But I think the first, to make big steps on + name, role value, on font sizes, on contrast, on labels, on images. I think if you do that, you already make a + really big impact for a lot of users, and that's not that complicated. Everyone is clear. It's quite clear what you + need to do there.
++ So that's start with the user, I think that's a really good example, and fall back on the principles if you don't + know the exact test rule or and it's different with Web because with Web it's like already in the level of detail + that, as an expert, you can't do that anymore. + I think with apps, finding the issue to be compliant, it's not a problem. Most of the time fixing the issue is the + problem. Because fixing the issue with not only, like, native iOS, native Android, but also the framework, different + frameworks, cross platform frameworks, there's a lot of solutions. And some frameworks are not able to do certain + things. So what we see is that where in the Web, Web is like kind of markup language, you can always fall back on + HTML. With an app, we have a programming language, and it's like it is what it is. And if you have an issue with + your keyboard, it means that you should, like, start over and build a new app in a different programming language. + And my question sometimes is to the government, on one side, I want it to be fully accessible. On the other side, + they've invested a few million euro on a certain programming language. It's not that you have to have one module or + one piece or one element that you have to change. You have to rebuild your, a new app, probably hire a complete new + team of developers. So I think that's, for apps, also quite a big challenge. +
++ And I think we should make accessibility a bit more fun, at least for people who are just getting in touch with it. + So automated testing, that was a question from Detlev. I think automated testing is not a solution for everything, + but it is a solution, you can make a big step with the basics with a lot of developers in the teams. Because if + everyone knows if they develop something, when basics are going wrong, we can make a big step in accessibility. + What we I don't want to do a big promotion, but what we are trying to do and what we are going to do in January I am + not allowed to make deadlines for my team, but basically, we are going to launch a tool that can test the first + screen of an app for free, so you can just put in the URL, and you get a report on the first screen. And if you want + to test more screens, you can do it in the cloud, and it will be a very attractive +
+DETLEV FISCHER: Is that for because you mentioned URL, is that for Web for apps?
++ PAUL VAN WORKUM: We only do apps. But you add the URL of the app in the App Store or PlayStore, and + we will do + the rest. So that's one. + Secondly, we will have a kind of debugger or accessibility inspector, and it will be able to take all the screens + apart. And you can inspect the name, the role, the value, if elements are focusable. We are investigating if we can + do it better with contrast because with the contrast check, if you make a print screen and send it to your desktop, + it's between 1% and 10% change in color code, meaning that if there's a contrast of 4.1, it could be 4.5, and it + could be a bust. So how can you deal with this? Can you only fail if people are below 4.1 or only fail when it's + below 5? Because then potentially it could be in the danger zone. A lot of questions, but what we can do with + automated testing is we can find the name, role, value. We can find contrast issues, target size issues, labels, if + their labels are added, if decorative images get focus, and text sizes. That's when we look at apps and we see an + app that didn't do anything, I think from 100 issues, 80 issues are name, role, value, text sizes, contrast, stuff + like that. So you can make a really big step doing using this automated testing. Of course, if an organization is + further, then the issues get more complicated, and then you can find, like, maybe 10% or 20% of the amount of + issues. And so it's not fair to say that we can find 80%. I did not do that. But we can find quite a lot of the + issues that are occurring in apps. +
++ DETLEV FISCHER: That would be a useful tool to have and make testing probably easier on those + things. + I just just to get back to the question of how to apply certain WCAG criteria, would like to give an example and + lead to a question which is about the responsibility of the author versus responsibility of the operating system and + the settings the user can make there. + For example, text size. Right? In the Web, you have ways of increasing the text size, for example, by most commonly + by zooming in. You zoom into your browser. The text gets bigger. You have a reflow. At some point it's just one + column of text. And you can then check, okay, does it increase text to 200%? + In apps, that usually doesn't exist. Of course, it's a possibility that the app has something like increased text + size, but most apps don't have that. And the common understanding of most practitioners evaluating apps regarding + text size is, well, we actually don't need to have a look at that. We don't need to look at that at all because + there's a zoom function in the Accessibility settings. So you can just turn on zoom, and then you can zoom up to + 500% or even larger by now, 500% I think is the latest I remember. And that will give you enough text. But + obviously, that also means that you don't reflow, so you get larger text, but you have to pan your screen + horizontally to be able to read the lines. Right? Because you don't have a reflow, you have to do that. So reading + gets a lot more difficult for people with visual impairments because they all the time have to pan in order to read + the text if it does not reflow. +
++ So the upshot is, okay, how do you decide, as an evaluator, whether that is good enough, as many people say, or + whether you also check that the text in the app changes its size if you change settings in the operating system? You + can increase a text size in your operating system assistive technology accessibility settings. You can say large + text, and if you implement it well, then the text in your app should get larger. Since you can do this. as an app + developer, is this something now that you also require and say if you don't do that, this point fails? Or is it + something that you can do on top of something where you say, well, this passes anyway because we have the zoom + function on the operating system level? That's my question to you. I mean, how would you deal with those questions + in evaluation? What's your line on author's responsibilities versus operating system level capabilities? + Does anyone want to pick that question? Paul? +
+PAUL VAN WORKUM: Yeah, I can give a reaction. I think in my bookshelf, on my bookshelf, there is a + dictionary. + And the question is because I have this dictionary, is it fine to use, then, very complicated words because I have + the dictionary? It's the same with AI. Like there is AI maybe the possibility to give me feedback on how the screen + is set up and what kind of icons are being used. And is that enough? That's the same with the book, the dictionary + on my shelf. Like is it enough? And that's the thing. What we see also from the Guidelines, what we see for Web is + that if you have a high contrast mode or big font size possibilities, and of course, if I put the high font size on, + then my letters are big enough that you don't need to meet certain contrast criteria anymore. I think it's a + discussion that you don't want to start.
++ DETLEV FISCHER: I started it. + (Laughter) +
++ PAUL VAN WORKUM: Yes, you have. I will give you my answer. Like I think that if you have an app + without changing + the settings on high contrast mode on the bigger font size, it should be working for, like, the average user. And + the users that need bigger font size, they should be in the system, be able to put it at least to 200%. Then all the + text in the app should scale. Not scale to 200% because in our automated testing tool, we found out that the body + text is scaling 200%, but the text that is already big at the highest levels, it means that maybe it only scales + 150%. So you can't so this is also, like, text should scale to 200%, but then in the settings, you should probably + put it at 300% or 350%, and we see that headings only scale to 190%, so it's untestable. What we do is set it at + 200%, and all text should be visible. Each letter should be in the box of the buttons. Meaning that you should check + if text scales, yes or no; and if it scales, are all letters visible? That's basically the simplification. But with + apps, in the title bar, in the tab bar, if you scale the text, you get issues again. But then there is a solution + with long press that you have an alternative to press it a bit longer, and then it's shown as well. I think that's + what you should do as a developer for large text. + And also with the contrast, I think that every user that is using your app, maybe seeing really well, if you walk + outside and it's sunny, you still need a certain amount of contrast. And you don't want to go to your settings to + make it a little bit bigger, a little bit more contrast. No. By default, your app should be usable for users. So + that's why we do it in this way. But maybe, Andre, maybe you do it differently. I am very curious. +
+ANDRE PIMENTA FREIRE: I totally agree with you, Paul. And from another perspective, when we look + into what kind + of devices people use, not everyone can afford let's take an example. What we've seen in a lot of research, the + iPhone. We've seen that a lot, and many people have asked and many research studies, so why have you not invited + iPhone users for your usability studies on accessibility? And we have actually done a few of them because we have + very few iPhone users in countries like Brazil. Some research studies have shown that more than some of them, 85% of + people surveyed used Android, Android phones, and even within the Android world, you have a lot of variability of + the type of devices people use and devices' capabilities, models, et cetera. So relying on the device may be very + tricky. So I totally agree with you, Paul, we should definitely try and look to provide the resources people would + need even if they have devices that wouldn't provide more features to do that on their own.
+As like Paul mentioned, there could be people who don't use assistive technologies every day, maybe because they + are outside and it's sunny, or even what we've seen, particularly here we have a recent case in our University of an + older student who is gradually becoming he is losing his sight. He still can see very little, but he he was not used + to using assistive technologies from his early ages, so now he is struggling to cope with different settings and to + learn with different things. So if he has to do a lot of work in terms of learning how to use assistive technologies + on his mobile phone, that's not easy. So I take from this example that if we can provide, especially if it's covered + in standards that are directed by regulation, I don't see why not to do it. I think it's good that we have devices + that are providing good resources, that have good assistive technologies, but they are not always available. People + are not always able to use them, as we would think. So I totally agree with that, Paul.
+DETLEV FISCHER: Yeah, I think it makes a clear case for needing more specific advice on how to + apply the success + criteria or EN requirements. For example, for the resize of text, we mentioned there are different ways of doing it. + There's zoom. There's also the accessibility settings for larger text. But it does not differentiate between body + text, it does not say anything about text that's already large. It does also not say anything about, say, labels in + a tab bar which could not grow by the same amount because they would break up or they would need to be truncated. So + those issues, they exist. And if you apply WCAG requirements by the letter at the moment, which does not + differentiate between types of text, it just says everything has to be 200% larger, then you end up with + recommendations for developers which may not be useful, which may actually make things worse. You know? And the + thing you mentioned, Paul, that you could have a pop up text, which is larger, is a nice way out. It's not really + something that, I think, has been foreseen in this, and it would not really be clear if that would be meeting the + requirements because it requires an extra step to bring up that enlarged text. But it's certainly something that + would be more, you know, more germane to the app environment. And there are many cases
+PAUL VAN WORKUM: Yeah, I think the problem is that if you enlarge if you say text should be + enlarged in the tab + bar, what you are saying is then you get dots and you can't see it as well. So the best alternative is what we see + as well is if you look at landscape mode, it's quite often used in combination with larger font size because then + you have three sentences with a lot of words instead of only like reading two words in each row. It's very tiring to + read in that way. Yeah, what we see is that we cannot fail on bigger font size if the solution is breaking up other + things as well. You cannot say to the developer in my opinion – you should do this but if you do this, I will fail + you on something else. So best practice, long press implemented, but in audits, are you able to fail on this? + Because if you fail on it, if you have five tabs, sometimes it gets three or four lines high, meaning that you don't + have space for your content anymore. Or if you have space, it's basically not yeah, you can't have any overview + anymore, especially with bigger font size on. Yeah.
+I have maybe another one. It's like with lists, I think it's one of the questions of users as well, that basically, + if you are in an app, if you present a list with dots, like bullets, it doesn't read it the same way as on a + website, meaning like it's a list, one out of four or one out of five points. One of my cofounders made a Dutch + COVID app accessible. He was the lead accessibility developer there. And if there was an auditing firm that did a + lot of Web audits stating that it was a fail, the list. And because it was very important for the Dutch government + to make the app fully comply, he built something that it was read out that the list, one out of four, but it means + that if you have it in 15 languages, it's a lot of, like, translation and strings, and it was it took a really long + development time. At a certain moment, the auditing firms in the Netherlands said if you can separately tap each + item, it's also good enough. So we are organized in an inspection group, and we agree that we are dealing with lists + in this way.
+DETLEV FISCHER: Yeah.
+PAUL VAN WORKUM: But now we go to the European Union, and in other countries, they do it + differently. That's, I + think, we should do something with that, that on a European level or, I don't know, that it should be that everyone + does it the same because otherwise it's unfair that some countries do or some auditing firms do, and we have a lot + of discussions in the Netherlands that some auditing firms fail on the keyboard controls or the focus indicator not + having enough contrast, and others do not because they say it's standard. We can't fail on this.
++ DETLEV FISCHER: Yeah, it's also there's a lot of leeway and a lot of wiggle room in terms of + interpreting the + requirements. You know? So you can arrive at different results. + We have a number of questions from the audience. Maybe I should pick some and throw them to you and see whether you + want to answer them. + One is what is your setup for mobile testing? Do you use multiple phones or a mobile emulator? Does one of you want + to answer that? Do you use several devices? +
+ANDRE PIMENTA FREIRE: We've done a couple of studies with different devices, but I mean, it's very + hard to have + all sorts of different settings. So I mean, in many situations we've seen, and from what we heard from a couple of + developers and testers, people tend to use emulators to have different settings, sizes, different OS versions, so I + think emulators can really come in handy.
+DETLEV FISCHER: Okay. And there's another question which goes to do you know tools emulate apps on + a desktop so + that Zoom text users can test apps more easily?
+PAUL VAN WORKUM: Like I don't use emulators at all. When I do an audit, I use one device, and I + test only + Android or only iOS. And I report, yeah, for each issue on which screen it is, I add the description of the problem, + I add a print screen, because then what we see that developers can identify very fast what is the issue. And we add + the criteria, meaning that if you link it, then, to the Appt platform, there's also the code base where you can find + the solution to the problem. So you can see a heading, and it's like with the screen reader with subtitles on, it + says log in, then it's not without comma heading, we say this is wrong, it's 1.3.1, look at a site for a solution. + So I can normally, yeah, like have in a meeting of one hour describe around 60 issues because it's like look at the + image, this is the heading. Next, is this a button? Insufficient contrast. Okay. Next. That I think is a very + important combination to have, like, visual information there. Especially in Appt because you can't with the + website, you can download the website and, like, present it later. But in an app, if you update the version and you + can't reach the old version anymore, and basically, it means that it's could be an issue that occurred only after in + an update, so you never know anymore if you did it well or not.
+DETLEV FISCHER: Yeah, well, I think, in my experience, there are differences between devices, + especially with + Android tests, so if you test something on a Pixel phone and you test something on a Samsung phone or some tablet, + you may get different readouts on certain items. So there are differences, and it may be useful to cover those. But + that's, in our experience, also down to the customer who wants to say, you know, please test this also on this + device, on this Android skin, for example, because this is one which we know has a large market share. And we want + this covered. + But regarding sharing the screen, we have done internally, we have done quite a few app tests where the blind tester + and the sighted assistant have been working together and sharing the screen via Zoom. So the blind tester shares his + screen, and at the same time, the assistant has the same app open on their device so they can also use it + independently to verify what the blind tester does. So that has turned out to be quite useful. And also, the blind + tester can share the screen reader output, so the assistant can also hear what's being output. So that is a setting + which has been quite useful. But it is time consuming. So be warned if you do that kind of testing, it is quite time + consuming. + I don't know how we are on time. We are now 16:25. Do we have more time, or are we... +
+CARLOS DUARTE: No.
+DETLEV FISCHER: Okay, then. We have many, many questions. I am very sorry that we haven't covered + more of them. + There are many questions, and I hope I can answer some or maybe the other participants in the panel can answer some + of them in the Question and Answer panel. And thank you very much very much for your insights. I think it's an + extremely useful discussion and there's so much. We just have got just scratched the surface, and there's a lot more + to talk about. But this is all we could squeeze in. So I hope it was useful for you.
++ CARLOS DUARTE: Thank you so much, Detlev, Paul, and Andre. It was definitely really interesting. There are still + a lot of open questions there in the Q&A, so if you some of you can tackle those, it will be very good for everyone + involved. It was really insightful and full of actionable material, I would say, so thank you so much for your + contribution. + And now let's have another ten minute break, and we'll be back at 16:35, so 5 minutes past the bottom of the hour, + for our final session on Artificial Intelligence for Accessibility Evaluation. So see you in ten minutes. +
+{% include excol.html type="end" %} + + +## Session 3: Artificial Intelligence for Accessibility Evaluation +- Moderator: Matthew Atkinson (Samsung R&D Institute, UK) +- Yeliz Yesilada (Middle East Technical University, TR) +- Alain Vagner (SIP, LU) + +{% include excol.html type="start" id="session3-transcription" %} +### Transcript of Session 3: Artificial Intelligence for Accessibility Evaluation +{:#session3-transcript} + +{% include excol.html type="middle" %} ++ CARLOS DUARTE: Okay. So I think we are ready to start our last panel. The topic for this panel + session will be AI + for Accessibility Evaluation, and it's going to be moderated by Matthew Atkinson from Samsung R&D, in the UK. And + our participants will be Yeliz from the Middle East Technical University in Turkiye and Alain from SIP in + Luxembourg. + Once again just a quick reminder for any attendee that has joined in the meantime, we are using Q&A for posing + questions that you might have to the panelists or to the people in the session. And we are using chat to share any + resources linked to topics being discussed or for any technical issues that you might have. + So Matthew, you can take it over. +
++ MATTHEW ATKINSON: Hi, everyone. Let me just juggle my windows slightly, first of all. Just one + second. + Okay. So we're very excited to have this chat. It's a privilege to be here. I welcome everyone. Thanks for your + attendance and to the insightful panels that have gone before us. Where this topic has actually come up, so we'll + try and give you our take on those questions. + So how this is going to work, we are each going to introduce ourselves and speak for a couple of minutes just to set + out our experiences. And you will see there's a lot in common between the three of us in terms of threads, parallel + threads. So what we'll do is we'll do that. And then we'll move into general topics of discussion. Of course, there + are some questions we already got from the audience which we've looked at, and as we can, we will answer things that + come up during the session. +
+So I'll begin. Hello again. I'm Matthew. I am Head of Web Standards at Samsung R&D Institute, UK. However, just to + be clear, I am not here representing Samsung. I am also co chair of the W3C's Accessible Platform Architectures + Working Group, which I will call APA from now on. One of our main jobs is to review W3C's specifications for + accessibility, but we also do original research of our own. And whilst I am not speaking on behalf of APA either, we + do a lot of research in this area, and particularly our Research Question Task Force, we have a lot of experts in + that task force that look the trends in this area. So I will rely some of my experience and some of theirs. So what + follows is my personal opinions based on experience, some experience of accessibility auditing and a little of + academia as well.
++ So one thing I wanted to do first of all is just distinguish between AI or machine learning and some of the current + automated evaluation that we can do. As other people have mentioned, actually, in previous panels, there are + automated accessibility evaluation tools, and they just use standard sort of heuristics. And they can capture around + 30% of the sorts of problems that the Web Content Accessibility Guidelines, or WCAG, identifies. So they don't + capture the majority of the problems, but they can give you a good barometer, a rough estimate of accessibility, and + they can be run in an automated way. But they don't use machine learning. So we are not talking about those. We are + talking about more recent developments. + And on machine learning, you'll notice that we'll talk about risks and opportunities, and we'll also talk about + mitigations. And I am just going to highlight one or two of each of those just now. And we'll revisit these as we go + through. +
++ So there's a concept from the literature called "burden shifting" or "shifting the burden." And a good example of + this is, for example, automated captions that are generated on, say, videos, streaming videos. And whilst they can + be useful, they are not necessarily 100% reliable, or they might be very reliable, but they are not 100%. And there + are some risks presented by that because if you are somebody who can't hear what's being said in the video and you + are relying on the captions to be accurate, then the burden for verifying the accuracy of the captions has been + shifted onto the person who is least able to do so. So that's one of the big risks. And there are others that we'll + talk about as well. Alain has some great examples of those. + There are some opportunities, though, because there are some things that machines can do better than humans, and + with some guidance, could present great opportunities. And Yeliz has some really good research that she'll share + with you on that front when it comes to accessibility evaluation. +
++ And in terms of mitigations, I just wanted to put two links in, which I will copy into the chat whilst I'm talking, + and these are two W3C efforts trying to help in this area. So I am just going to paste these in, and I will tell you + what they are. There's the W3C's Principles of Ethical Machine Learning, which is an official W3C deliverable, which + is being worked on. And then there is also a community group, which isn't official W3C work, but it's something that + is being incubated. This community group is called Accessibility at the Edge, and one of the things they are trying + to do is gather consensus on where we can and where we might not find machine learning to be helpful. So anybody can + join that community group. You don't need to be a member of W3C in the sense of being a paid member or a member + organization. You only need a free W3C account. So we welcome feedback on these efforts. + Okay. So that's definitely enough from me. So I will hand over, first of all, to Yeliz to give your sort of + introductory statement. +
++ YELIZ YESILADA: Hello, everybody. Good afternoon. Thank you, Matthew. First of all, thanks for + inviting me here. + It's been great. It's really great to see the first two sessions. I really enjoyed them myself. Especially mobile + accessibility one. I remember it was in 2009 that we actually created that document in the Education and Outreach + Working Group talking about the common experiences between mobile users and disabled users. So it's really + interesting to see the discussions and how they evolved. + Let me introduce myself. So I've been in academia for more than 20 years. It's been quite some time. I mainly do + research on Web accessibility. And recently, actually, the last five years, my research mainly focuses on using AI + to actually improve accessibility for disabled users. I do research in eye tracking and human computer interaction + as well, so we also try to use, for example, AI in, let's say, for eye tracking research and how we can actually use + AI for eye tracking. +
++ The recent research that Matthew mentioned, we've been actually looking at we have already discussed this in the + previous session, especially the importance of WCAG EM, evaluation methodology. It is a great resource for, of + course, systematically evaluating websites. In this case, I am broadly using the websites definition. But there are + a lot of subjective elements. I guess Paul mentioned it in the previous session, for example, even if we take a + website, two auditors can generate different conclusions. + One of the reasons for this is basically, WCAG EM has different stages, and one of the stages, for example, is + defining the evaluation scope, what you consider, for example, as a website. Then exploring the target website, so + deciding, for example, which pages, you need to sample from the site, which pages to consider. That becomes a + complex and subjective task. And we actually propose in our research a parallel methodology. We call it "Optimal + EM," where we try to explore mainly machine learning approaches for actually doing a bit more systematic sampling. + So on chat, I added the recent two papers that we published on this. +
+So what we try to do, we try to, for example, first of all, define what is establish a population for a website, + what is a population, because you need to decide, for example, what pages are there, which ones are used, which ones + are not used, et cetera. And then what we try to do is we try to cluster pages that are available on the site by + using unsupervised approaches, mainly based on statistical techniques. And we try to generate representative sample. + But of course, generating a representative sample for a site is not that straightforward because you need to + consider, for example, do we actually have enough coverage? Do we cover different pages? Do we cover, for example, + the freshness of the pages? Do we cover the complexity, variety of complexity of the pages, et cetera?
++ So we also introduce, for example, different metrics that can be used to assess whether you are doing good sampling + or not. This is basically trying to use unsupervised learning approaches to actually do sampling to help to choose, + to guide what kind of pages you take from a site. And then you sample and you do the evaluation. + In my research, I also I am also quite interested in complex structures. For example, tables are complex structures. + How do we evaluate the accessibility of those complex structures? Because those kind of complex structures, for + example, they are used for, let's say, not just for representing data, but they are also represented for basically + visualizing or laying out the content of the page. So we also try to use, for example, supervised approaches, + algorithms where they try to look at data and learn from differentiating from that data, learn to differentiate, for + example, where tables are used for layout or they are used for structuring purposes. +
++ In general, just to set out the overview, these are from my research examples. But I believe the AI can actually + help in two ways. AI is not going to be, of course, a silver bullet. So it's not going to solve all the problems. So + Matthew mentioned, for example, that the 30% of the issues can be automatically already identified. But of course, + with the rest of the 70%, if they can help us and automate certain processes, that would be great. + So it can actually be useful in two ways testing, and also for helping the authors, guiding the authors, or maybe we + can call it repairing the accessibility issues. + So for testing purposes, I also see that there are certain areas where we see potential. For example, language + models can be used to assess the complexity of the text or the layout. We can actually also use, for example, + alternative text generated, whether it is appropriate or not for certain kinds of elements. That can also be an area + where automation can be done. + Images, whether they are used for decorative or for semantic purposes, again, AI can help there for differentiating + them. + Page elements, I've been doing research on that for a long time. It's a complex task to actually take a page and + decide what are the page elements and their roles. But of course, machine learning can also help there. +
++ But there are also things that can help. Of course, AI approaches that can help at the authoring stage, in my + opinion. For example, generating alt text. So we see that there are a lot of research in that, especially in image + recognition and machine vision. Or automating the caption generation, so that can also help in, for example, + automated caption generation. Or text translation, because we see that multiple languages can be an issue, so + automated text translation. So AI models can also be useful there. + And these kind of examples, I guess we will discuss them. But of course, besides the evaluation and also the support + of authoring, there are also tangent issues to these approaches that we have to be careful. Matthew already + mentioned. For example, these algorithms, they rely on a lot of data and good quality of data. So it's critical that + we got data and we got good quality data. It's also important that we avoid bias. So for example, certain user + groups and certain disabilities, we should not really have a bias towards certain user groups or disabilities. We + should not really exclude users, so ethical dimension is critical in there. And also, the accuracy and reliability + of these approaches that I mentioned, they are also critical. So how successful they are or how accurately they can + actually help us. But of course, they cannot solve, let's say, the full problem. But they can at least assist and + help and guide in the process. So these are the issues that I wanted to mention as the tangent issues. +
+Matthew, I think that's all I wanted to say. I guess we'll discuss them later again.
++ MATTHEW ATKINSON: Yes, yeah, lots to discuss, lots of common threads. Thank you for that, Yeliz. + And now over to Alain for your introduction. +
++ ALAIN VAGNER: Thank you, Matthew. Yes, so I will just briefly present myself. I am an accessibility + specialist at + the Information and Press Service of the Government in Luxembourg. It's a small country in Europe. I am also a + member of the committee developing the European norm, so via CEN and CENELEC. I have background where I work in the + field of human computer interaction for several years, and I have also been a software engineer and product manager. + At the Information and Press Service of the Luxembourgish Government, I am a part of a small team in charge of + several topics, like administrative transparency, open data, freedom of information, and also digital accessibility. + And more precisely, we are the organization in charge of monitoring the accessibility of public sector websites and + mobile applications. In the framework of the European Web Accessibility Directive. +
++ So there are similar organizations doing the same job all across Europe in all EU Member States. And we are also in + charge of the awareness and training of the public servants on digital accessibility, and we monitor complaints + coming from the end users. And for each complaint, we act as a mediator between the end users and the public + administrations. + Regarding the monitoring, we are conveying more than 100 audits per year, so it may seem few, but we are also a + small country, so that's why. And all our audit reports and all the data we produce during this monitoring are + published online and are on open license on the National Open Data Network, and they may be used, for example, to + train an AI model on this, for example. So I don't know if this is quality data, but they are some kind of readable + data for sure. +
+I wanted also to mention that I am not an AI specialist, but I am interested in the topics and all tools and + technologies which could help us improve the performance of the execution of our audits. That's one thing. And also, + I wanted to mention that personally, I am quite aligned with Yeliz when she said that AI maybe not a silver bullet, + and I don't think that accessibility can detect that sorry that AI may solve all accessibility issues, but we must + find the right tool for the right problem. That was it for me. Thanks.
++ MATTHEW ATKINSON: Thank you very much, Alain. So lots of stuff to talk about. + First of all, one of the things that we just discussed on the risks side of things was bias and avoiding or trying + to avoid excluding certain user groups. And Alain, you actually have a really good example of this, involving + language because of population size. So would you like to tell us about that? +
+ALAIN VAGNER: Yes, so regarding the languages, so Luxembourg is a very small country, but we have + several + languages here. So for example, the national languages are German, French, and Luxembourgish, which is a separate + language. So yeah, in the population, also 75% of the population speak more than one language at work. And 50% + between two and three languages at work. So this is the multilingual part in Luxembourg is very important. So it + means that this can also be reflected on our websites. So all the websites need to be in multiple languages, or the + official websites from the public sector. And we have also lots of issues with mixed languages on the websites. It + means that as people are really used to speaking multiple languages, it's not uncommon to see, for example, someone + yeah, so some chunk of text where the language is different from the main language of the website, for example. And + this is really common. But this needs to be appropriately tagged with their language attribute, for example, in + HTML, so that the recitation from the screen readers will be correct with the right speech synthesis. That's the + first one. We have also some issues with the videos. So it means that, for example, we are trying to have subtitles + and transcripts for all our videos, and there's no automatic captioning available for small languages. So we have + 400,000 speakers of Luxembourgish, and the big platforms, big tech are not really supporting those these small + languages. So it means if you have, for example, a video in French or in German, you will have automatic subtitles + on YouTube, for example, but if something is speaking Luxembourgish, or worse, if somebody is speaking in the same + video in multiple languages, then you are alone, and you should subtitle it yourself. So this could be more costly + to produce. And here we have also some projects on this topic, like a project related to speech to text engine, and + the tool for the automatic transcription of videos. So these are ongoing projects using AI. We are not there yet, + but we are working in this direction. This is one point regarding the languages.
+And another point is also the complexity of the languages. Because if you are in a multilingual context, you cannot + assume that everyone is totally fluent in all the official languages. And this has also an impact in accessibility + because, for the Deaf community, as you know, people who are born deaf have more problems acquiring languages. And + maintaining context is also an issue. So we should also work on easy to read documents, easy to read pages so that + it can help people with cognitive disabilities but also the Deaf community. And on our side, the Deaf community is + mainly German speaking, so we are working mainly on the (speaking native language) it means easy to read pages on + our websites.
+MATTHEW ATKINSON: Thank you very much. I think there's some really good real world examples there + of the + implications of sizes of data sets and those kinds of issues. And the example of captions has come up quite a bit. + And it's a good example because it allows us to introduce a concept of at which time do we use a machine learning or + AI kind of approach? And in that captioning example, although it's not directly related to evaluation we will bring + it back to that shortly the captioning example shows us that at one time, authoring time, helping somebody, a + person, make the captions, it could really speed them up. Now, of course, right now, we are benefitting from a human + captioners, which is the best you can get and is fantastic. But not everybody is able to support that. So authoring + time allows a human the option of correcting the mistakes that they know are there. Runtime does not. So that's a + difference in implications because of the time at which you employ the tool.
++ And talking about accessibility evaluation, doing things at sort of sampling time as opposed to audit time, perhaps, + may be very similar implications there. The statistical models for looking at large sites, speaking as somebody who + has had to do sampling in the past, I would really appreciate being guided by those tools. I would, perhaps, be less + confident in machine learning's ability to pick up, certainly, all the issues, maybe even a fraction of the issues, + in the real sort of accessibility testing side of things for reasons that Yeliz has mentioned and also was discussed + previously about the issue of context. + So again, guidance, supporting scalability by having the tool guide the human and using it as a tool, more on the + authoring time end of the spectrum rather than the runtime end of the spectrum, in my view, at least, could result + in more reliable and, therefore, fair usage. +
+So Yeliz, you already introduced us to Optimal-EM to some degree, and you also talked about ways that the tools + could be used, machine learning could be used. For example, at authoring time to provide alt text. Could you tell us + anything about this issue of context? And I think you touched upon it with the tables where the machine learning + system has to do some interpretation and what sort of risks might arise from that and where there might be some + opportunities.
++ YELIZ YESILADA: Of course, identifying context is a big challenge, I think. Of course, for + evaluator, it's also a + big challenge, considering different contexts for the evaluation. But, so certain complex structures, they can be by + actually having, let's say, relevant data, certain algorithms can be generated to guide, let's say, the authoring + stage, as you mentioned, Matthew. + So during the authoring of course, these are all intertwined together, you know, the authoring and evaluation. + Because if they are corrected at the authoring stage, then it's going to be easier to do the evaluation, and it's + going to be kind of easier to test them at the evaluation stage. + But if, for example, while the author is, let's say, authoring and generating certain structures, they can be the AI + can actually help there to identify, for example, that certain structure is actually used, for example, for not + putting the data not presenting data, but it's actually used for laying out, for example, that it should not have + been used because it's causing problems to screen reader users. That would actually be a great help, as you + mentioned, at the authoring stage. + But identifying the context, it's a big challenge, and of course, it will be also algorithmically challenging for AI + algorithms, I think, identifying the context. So it's not going to be straightforward issue. +
++ MATTHEW ATKINSON: Indeed. So shifting gears slightly, isle not sure if we've had any questions. + There are certain + other topics we'd like to consider. And just a note that you will see me using an assistive technology here, which + is called the Manual Zoom, so that I can check for questions every so often. + But one of the things that might be useful is Alain, you had set out a series of requirements that you would have + for considering using AI or machine learning technology. So would you like to tell us a bit more about those? +
++ ALAIN VAGNER: Yes, no problem. Yeah. So, yeah, as a public sector organization, we have, of course, + a few + requirements regarding AI. + I would say the first one is the transparency because in the public sector we need transparency. And for AI tools, + we need to know, for example, how has it been trained, where the data is coming from, because it will help us also + regarding the questions for the questions regarding biases, for example. Biases are a frequent issue in the AI + field, and we absolutely want to avoid this. For example, to be more precise, more performant on one type of + handicap and less on another, so this we would like absolutely to avoid it. + And yeah, for example, if we had some kind of AI trained on all our reports, we could, for example, I don't know, + maybe have automatic we could find some issues automatically. But for the edge cases, where we have less training + data, we would have less precision. And on these edge cases, more often than not, these are the issues where we + spend lots of time as an auditor. So this is something that may be a bit tricky. +
++ I would like to also mention the accountability because we need to be able to explain a decision, how can we do it + with if we have just a black box, for example. So this may be an issue with some models. This is also relatable to + the AI. This is also the concept that an AI or algorithm cannot alone be made accountable for a mistake. So we + cannot use AI to exonerate us or so from our responsibilities towards the persons with disabilities. + Yeah, there was also the questions about the metrics, and I think Yeliz already mentioned a little bit. For us, we + would like to know how to evaluate the efficiency of an automated tool, an AI tool. Two basic metrics I see are the + detection rate and the false positive rate, so these are the two which are really important for us, so the tool + should be able to detect the issues if there is one and also avoid saying there is an issue if there is none. +
+So yes, that's it, I would say. And more globally, maybe more at an abstract or political level, when introducing + new AI tools, we should avoid the risk of disability danger, a concept introduced by Liz Jackson. It means that from + time to time, we encounter some AI technologies that have been developed, and they have not been created including + people with disabilities, and they don't really answer the need of people with disabilities. So this should also be, + to my mind, be included in our requirements.
++ MATTHEW ATKINSON: Yes. On that point specifically, I am not actually sure if this is in the + principles, Ethical + Machine Learning Principles, but one of the things that was discussed around the development of those and they still + are in development, like most W3C things, feedback is welcome it was discussed ideas around when a system makes a + decision about a particular user or a constituency of users, those users need to be involved in the design of the + system if it's going to be making decisions about them. And that's that feels to me like a related thing. + And you mentioned about metrics, false positives and detection rates. And Yeliz was talking earlier about the work + with Optimal-EM and getting to the stage where it could do unsupervised work. Could you say a bit more about false + positives and detection rates that you've come across in research, Yeliz? +
++ YELIZ YESILADA: Do you mean the metrics that are available or metrics in general for the sampling + work? Because + for the sampling work, we actually, with WCAG EM and in our research, we actually realize that we don't really have + metrics to decide, for example, WCAG EM says you should explore the sites and pick certain pages that are + representing the functionality. But these are really subjective definitions because you can pick a functionality of + the website, let's say, but it is very outdated. So does that mean you covered the functionality or not? + So in our work, we actually try to come up with metrics that they can be really assessing whether you are doing a + good sampling or bad sampling. So these metrics that we introduced, for example, they include the they have, for + example, coverage. So let's say you pick certain pages, but how much are you covering the whole site? You know? + What's the population that you are covering? In fact, we are drawing similarities with the census work that + governments are doing. For example, if you have a population and you want to do a survey with your population, you + need to make sure the survey is done with a sample that is representative and it has the full coverage of the + population. So we are trying to, for example, use these kind of metrics. And besides the coverage and + representative, we also, for example, introduced the idea of freshness. So if you are actually going to sample + pages, your pages should be kind of fresh pages, pages that people are using. + So let me give you an example of the periods during COVID 19. In that period, for example, certain pages that were + related to COVID 19, they were very critical for the population. So if an auditor is picking pages let's say they + are auditing a site but they are not including those pages, they are missing critical, fresh pages that lots of + people are visit. So we also introduced, for example, freshness. We introduced complexity, for example, because + auditors when they pick pages, they might pick pages that are simple to evaluate and avoiding the complex. Of + course, the question there is what do we mean by complexity? Because complexity can be like technically complex, it + can be visually complex, so you can have different kinds of definitions for complexity. But we think for sampling, + that also should be a criteria, for example, when you are picking pages for evaluating, you should not pick pages + that are easy to evaluate, let's say, technically, but they should think, really, of the recent technologies that + are used, you know, dynamic content. We know that they are challenging to evaluate. So do they include dynamic + content? That's another metric we consider. +
++ Based on these metrics, what we try to do in our work, let's say you generate the population of the site. We also + explore, for example, how do you generate the population of the site? For example, you can crawl the site + automatically and find out all the pages, which we know that is not possible. Technically very difficult. Or you can + also look at, for example, the server side logs. So the server side logs can also be used to generate a population. + And we use these metrics to actually, for example, compare different ways of clustering, using machine learning + approach to cluster the pages. And then you can cluster them, for example, based on complexity. You can cluster them + based on the structural similarity. You can cluster them based on, for example, freshness. And then what you do is + you can sample from different clusters to make sure that you are actually kind of covering the a representative + sample from a site. + Of course, here we are focusing on the site. But in the previous session, there was a very nice discussion about + what do we consider, what do we sample from a mobile application? That should be, of course, considered. For + example, different screens, different layout, different pages generated, et cetera. So there are lots of questions + that need to be answered, of course, from a research perspective. +
+MATTHEW ATKINSON: Indeed, yeah.
+YELIZ YESILADA: I hope I mentioned your question about the metrics for sampling.
+ ++ MATTHEW ATKINSON: Yeah, that was very helpful. That was very helpful indeed. + So from my experience of doing accessibility audits, it is difficult to come up with a good sample. There is both + science and art to it. And we would often go for looking at the things that a user could do with the site or the + application and trying to cover as many of those different things as we could within the budget of the size of + sample that we had. And we would generally go for the more complicated theming pages so that we were making sure + that we would cover as much as possible. In some ways, it's easier if you have a smaller number of samples that you + have to get because you can pick stuff that's obviously different. It gets harder if it's a bigger site and a bigger + volume of pages to be audited because you want to make sure that each one is counting for something and not just + repeating stuff. And machines are very good at spotting patterns. So as I have said before, I would have been + interested in having some guidance, even though, as you've discussed, you know, in your answer there, it turns out + that counting things is one of the hardest problems there is. Just counting how many things we've got is incredibly + difficult. + +
++ So we actually had a question earlier I am just trying to see if we've got any additional ones now. But somebody + asked earlier about whether we actually need accessibility guidelines so much anymore if AI is going to be building + the websites? And I had a couple of perhaps not fully formed thoughts on that. Even if AI was building the websites, + and even if a different AI was measuring them and for my part, I don't think that's going to be 100% of the case in + future. I think it's a tool. But even if that was the case, we would still need accessibility guidelines in order to + make sure that the site was being built to a particular standard and the site passed a particular standard in terms + of requirements so that it would be expected to be accessible. + And so I think there's still a need for accessibility guidelines. And in a way, my answer to that question probably + says more about my perspective, which is we are building these things for people, and that means to me that people + really are best placed to be involved in making those creative decisions around both the building of it and the + creative or subjective decisions in the testing of it. + It remains to be seen how effective machine learning can be as a tool, but there's definitely certain things that + seem like exciting avenues for exploration. So that's my thought on that particular question. But I'd like to turn + it over to either or both of you to see what you think about that question. And apologies for doing this suddenly. + Hopefully my rambling has given you time to think about it. +
+YELIZ YESILADA: Matthew, I just want to add there I think we need guidelines, one for more + another. Because what + I see also in application of AI, we really need expertise. We need experts to we need people who have good + understanding of the requirements of disabilities and disabled people such that they can also encode it into + algorithms. You know? So when we say "AI," of course, these AI algorithms have to be developed, they have to be put + in action, they have to generate the models. In order to generate models, of course, we need experts that understand + the requirements of the disabled people and disabled users. And the understanding of those requirements are encoded + in the guidelines. I mean, if you call them guidelines or requirement documents, one form or another, we will need + them because we need people to have good understanding of what is needed, I think. Because I didn't mention it at + the beginning, but I also did see this as one of the challenges for AI advancement. We need people who are good at + algorithms development and application of, you know, generating models, et cetera, but we need people also having a + good understanding of good requirements and good accessibility requirements. I think these guidelines or the + "requirement documents" they are an excellent place for communicating these kinds of requirements so they can be + automated or modeled in one form or another.
++ MATTHEW ATKINSON: Yeah, and to me, this is a continuation of the well known principle that if you + want to really + find out how accessible your site is, get some people who are facing accessibility barriers to test it. Somebody + like me can come along and tell you where areas of potential risk are and technological solutions. At least me in my + previous role. And that's all very well and good. And I do have some lived experience of disability; I have a vision + impairment. But the best people to tell you are the people that are going to be using your site. And so it's always, + always the best idea to get input from real people using your products and services as often as you possibly can. + So just coming back to that question, do we need accessibility guidelines, Alain? +
++ ALAIN VAGNER: Yes, so I think it is really needed. I just wanted to add something that is probably + less + interesting for most of you, but it's interesting for me. It's the legal part of it. So for us, for all the public + sector websites, it's in the law, so the website should be compliant with the guidelines. So if there is no + guidelines, we will have an issue. So we need to we need somehow a scale. We need to be able to compare. We need to + be able to say if a website is compliant or not. And this cannot be done without any guidelines. + And this is also important, I don't know, for the business also because you know the European directives are often + an economic impact. And one of the aspects of the Web Accessibility Directive was also to develop a uniform market + for accessibility in Europe. So we need these guidelines to have this uniform market. +
+MATTHEW ATKINSON: Excellent. So thank you very much for that perspective. We do have some + questions that have + come in. One of the ones I briefly wanted to come back to is there was the general question that we got about could + AI be trained to evaluate accessibility? And I think we've all said that there are possibilities here, but there are + challenges. But one of the things that was mentioned was this European wide monitoring exercise. And Alain, you + mentioned, who knows, maybe some of the data for that could be used to train AI. And I am just wondering, Alain and + then Yeliz, what your thoughts on that are, and then we can go to some of the questions that we've got in queue. +
++ ALAIN VAGNER: Yeah, so I would say I think it should be possible, but probably the data should be + of good + quality. This is something Yeliz already mentioned. And we didn't think about it when we produced our report. So I + would say for now, maybe we should also discuss with AI specialists that could tell us what they need as input to be + able to train their models. But I think there are some opportunities or there are also some kind of pretrained + models. I don't know if this is really totally answering your question. But for example, we have lots of programs, + as I said, linked to languages, and there are some pretrained models, like I don't know, and these languages could + help us a lot regarding our mixed language issues in the pages. So this, I think this is something that the model is + already there, more or less. Maybe we need to refine them for some of the languages we use here that unfortunately + may not be globally available, but yeah, this is one point. + Yeah, for the rest, I would say that's it for me. Thank you. +
+MATTHEW ATKINSON: Okay. Any additional thoughts on that, Yeliz?
++ YELIZ YESILADA: I just want to say, as I said at the beginning, AI is not the silver bullet. So + it's not going to + actually solve the full problem, in my opinion, in the near future. We need a lot of development in the area. But of + course, there are certain areas that we mentioned that it can really help. I think we already mentioned them, so I + don't need to repeat. But there are certain things that AI can AI models can be generated to help out with the full + process of evaluation, I think. + Matthew, I hope I answered. +
++ MATTHEW ATKINSON: Super from my perspective. + So there's one question here that I feel like I can answer just myself, although I will invite you guys to chime in. + And it's a good question, as they all are. Are there any accessibility guidelines or standards that we can use to + evaluate AI or machine learning interfaces such as chat bots or ChatGPT? +
++ From my perspective, the answer is yes. It's WCAG. It's the existing standards. These interfaces are presented via + the Web, and so you can apply WCAG to them. + Slightly more specifically, there are and on a little bit of a tangent, there is a growing resurgence of command + line user interfaces, especially for developer tooling. And command line interfaces that actually operate on a + machine natively, you can't apply the whole of WCAG to them, but there is work at W3C that tells you which bits you + can apply. Just as we've talked about in other areas, WCAG is being applied in a range of different areas. But + whilst these chat bot interfaces, they might look very conversational and almost like a command line interface in + some ways, they very much are, to my knowledge at least, being presented as Web apps and, therefore, I would say + that WCAG is the best set of guidelines for that. + Do either of you if either of you have any additions to that or differences of opinion on it, then I'll just give + you a couple of seconds to say so, and I will try and pick one of these other questions because we've got a few. +
+YELIZ YESILADA: I agree with you, Matthew, so I have nothing to add.
+ALAIN VAGNER: Same here.
++ MATTHEW ATKINSON: Okay. So I see a couple of questions along the lines of could we say that AI will + never be able + to complete the last kilometer of on its own whilst doing accessibility testing or remediation? And I think at the + moment, we are all saying pretty much that. And we've talked about that a little bit, but it's a nice way of + phrasing it. + There's a question here that says we know that AI is helping with meeting and perhaps even evaluating compliance. Do + we know of any examples where AI has broken things that were working well? Would either of you like to talk about + that? +
+YELIZ YESILADA: I can add there. I think we mentioned sorry, I jumped in, I think. I just wanted + to say that, of + course, in AI algorithms, accuracy is very important. So if the accuracy of the models are, of course, not high, + that means that they will not be able to handle certain things, and they will make wrong decisions. So we can see + that they can actually break things up. We see in, like, caption generation or alternative text generation that at + certain times, for example, the models are not able to generate automated caption properly or automated alternative + text. That's just what I wanted to say.
+ALAIN VAGNER: I have maybe another example also in the same vein, so it's the same idea. I have + been doing + recently lots of tests for some tools for PDF remediation. So we have lots of problems on the public sector + websites. There are, let's say, lots of PDF documents that are available on the website, and they are totally not + accessible. So we have done some statistics, and on the 20 biggest websites in Luxembourg, approximately 60% are not + accessible, which is really big. And on this, so some of the organizations asked us but yeah, it's really we have + tons of PDFs. How will we be able to remediate them? And there are some AI tools, so they were testing them. We also + tested them. And we have seen that so AI's mainly involved in the auto tagging, so tags are, in fact, metadata in + PDF documents that are used to express the structure of the document for assistive technologies, in particular for + blind people, for example. And this auto tagging using AI is a bit better than auto tagging based on heuristics, but + it's still not there. So I have seen that some companies are announcing that their AI is more performant than manual + tagging, but from my experience, it's not the case. I would be interested in seeing independent tests on this. + Independent tests, that would be really helpful to see to what degree are these tools able to automatically tag + documents.
+From the issue we have seen, there were some things like you mentioned before, the complex problems, like the + detection of headings in the tables, et cetera, detection of artifacts, what is decoration, what is not. False + reading of complex layouts, so when you have complex layouts on pages, the reading is often not really good. And the + detection also of some patterns. So in documents, you have some special patterns, for example, like table of + contents. The table of contents is a special kind of pattern, and it should be detected by this AI. So these were a + little bit the points where one or two AIs I have tested were not able to detect everything. But I think there is + some, yeah, room for improvement there, of course.
++ MATTHEW ATKINSON: Okay. We've got three minutes left. I have seen one other question, which I wish + I had seen it + earlier. But I will do my best to just set out some general ideas about it. This is a complicated subject, so wish + me luck. + So somebody's asking could knowledge of overlay tools be usefully used for technical monitoring? +
++ And I think it's important to introduce people to what the concept of an overlay is. At its most basic level, it's + imagine you have a site, your organization has a site, and an overlay is effectively third party code that you + import and you run on the client side in the browser. And its goal is to detect accessibility problems and to fix + them. And as you can see from our discussion, our view is that there is potential for some things to be done with + machine learning, and there's still a long way to go with a lot of other things to do with machine learning. And so + there are differences of opinion in the industry as to the efficacy of these tools. But as you have seen from our + discussion, you know, there's openness to exploring things. + But overlays being run, if they were on many sites and they had the opportunity to see many sites, I think the + question is can that add up to some useful monitoring experience for us? + I am not sure that there would be enough verified data to start forming a model from that. But very quickly, I am + just wondering if either of you have a few thoughts on that. I think it's going to just have to go to one of you + because we've only got a minute left, so I apologize for that. But if any of you have got any extra thoughts on that + to add to mine, please do. +
+ALAIN VAGNER: It's a good question. Yeah, it's difficult to say. So from our experience, these + tools can be + interesting for some fixes. But also, we should not rely only on them. It could be, for example, a first step. We + have done something on our website. We have included a bit, it is not the end of the road. So there are some stuff + that still should be done on the side of the authors, on the technical side on the website. So you cannot detect, as + we have said, automatically all the accessibility issues. So you cannot if you cannot detect them, then you cannot + fix them. So there is always still room for manual testing and manual evaluation and, yeah, improvements of the + accessibility of websites.
+YELIZ YESILADA: I agree with Alain. Matthew, I just wanted to add, I think we already mentioned + it, as with the + AI algorithms, these overlays, as well, I think we have to carefully approach them. You know, especially their + accuracy, reliability, and transparency. So we have to, I think, carefully approach them as with the AI models. So + rather than targeting to replace the evaluator, maybe, you know or evaluator or an author and to fix the problem, we + can actually use them as, like, a supportive role. So making sure that they are checked afterwards whether they are + doing the right thing or not based on the reliability, accuracy, and the transparency, as I mentioned.
+MATTHEW ATKINSON: Cool. And we have to wrap it up there, and I apologize for us going over by a + minute. I will + just say again thank you to everyone, and especially Alain and Yeliz. And if you want to discuss issues like + particularly like the last question, I think the Accessibility at the Edge Community Group would be a good place to + do it because we are trying to get consensus in the industry on issues just like this. And also please check out the + Ethical Machine Learning Principles. Thank you very much. Back over to Carlos.
+CARLOS DUARTE: Thank you so much, Matthew, Yeliz, and Alain for this another wonderful panel. I + think we have + been really lucky with the three panels today. It was an excellent experience, I guess, with three different + perspectives or three different topics on how to increase digital accessibility of resources.
+So once again, just to conclude, many thanks to all our participants today Jade, Sarah, Audrey; Detlev, Andre, + Paul; and Matthew, Yeliz, and Alain. It was a privilege to attend this session, and many thanks also to the + interpreters and the captioner for their excellent work. And as I said at the beginning, this is the final symposium + of the WAI CooP Project, but hopefully this trend will not end here, and W3C can pick up the organization of this + Symposium and continue them in the future, hopefully. So thank you so much for all panelists. Thank you so much to + all attendees. And looking forward to the future developments on Web accessibility. Bye bye.
+{% include excol.html type="end" %} + + + + +## Organizing Committee +{:#organizing} + +### Symposium Chairs +{:#chairs} + +- Carlos Duarte (LASIGE, Faculty of Sciences of the University of Lisbon) +- Letícia Seixas Pereira (LASIGE, Faculty of Sciences of the University of Lisbon) diff --git a/pages/about/projects/wai-coop/symposium3_transcript.html b/pages/about/projects/wai-coop/symposium3_transcript.html new file mode 100644 index 00000000000..3407e7d1223 --- /dev/null +++ b/pages/about/projects/wai-coop/symposium3_transcript.html @@ -0,0 +1,1839 @@ + + + + +CARLOS DUARTE: Hello, everyone. We are delighted that you are able to join us in this Evaluating + Accessibility: Meeting Key Challenges Symposium. We are looking forward to the three panel sessions today. My name + is Carlos Duarte, and on behalf of the whole organizing team, I would like to offer you our warmest welcome. + Let me take just a moment to say a big thank you to the wonderful team that made this possible, my colleague here at + the University of Lisbon, Letícia Seixas Pereira, and my colleagues from W3C. Also a big thank you to the + interpreters and captioner that will support us today.
+ +And before we get going, just a couple of important reminders. By taking part in this symposium, you agree to + follow the W3C Code of Ethics and Professional Conduct and to ensure to promote a safe environment for everyone in + this meeting. + Also, this session is being video recorded and transcribed. The recording and transcription will be posted on the + Symposium website later. And if you object to being transcribed, we ask you to refrain from commenting. + I would also like to take this opportunity to thank the European Commission that co funds the WAI CooP project + through the Horizon 2020 program.
+ +Now let me describe some of the logistics of this meeting. Audio and video are off by default. Please turn them on + only if requested and turn them off again when no longer needed. And during the presentations and the discussions, + you can enter your questions using the Q&A feature of Zoom. Panelists, moderators, and the organizing team will + monitor the Q&A, and they might answer your questions, either live if time allows it, or directly in the Q&A system. + You can use the Chat feature to report any technical issues you are experiencing. We will monitor the Chat and try + to assist you if needed. + And if during the seminar your connection drops, please try to reconnect. If it's the whole meeting that is + disrupted, you won't be able to reconnect, obviously, and we will try to resume the meeting for a period of up to 15 + minutes. If we are unsuccessful, we will contact you by email with further instructions.
+ +Okay. As I mentioned before, this symposium is one of the results of the WAI CooP project. WAI CooP started in + January 2021, and we will run until the end of this year, so another month and a half. This is the third and final + symposium organized by the project. + The main goals of WAI CooP include supporting the implementation of international standards for digital + accessibility. And the project aims to achieve this goal from various perspectives, by providing different overviews + of accessibility related resources, including tools or training resources; by developing actions like this one to + promote collaboration between research and development players; and by creating opportunities for the stakeholders + in this domain to exchange their best practices through, for example, a series of open meetings that also concluded + earlier this week.
+ +And as I just mentioned, this is the final symposium organized by the WAI CooP Project. This symposium aims to + discuss three of the main digital accessibility topics that have been identified through a series of interviews with + stakeholders involved in worldwide accessibility and monitoring efforts. Each topic will be discussed in its own + session. In each session, we will have a moderator that is involved with W3C work and two panelists, one working in + research or academia and the other practitioner in the field. And with this setting, we expect to ensure a broad + spectrum discussion that considers perspectives from different viewpoints that are not always synchronized.
+ +And now I will finish by introducing you to today's agenda. And we will start with a session on Digital + Accessibility Training and Education. After this session, we will have our first coffee break. The second session + will focus on Mobile Accessibility, and once again, it will be followed by another ten minute coffee break. And the + final session will discuss the topic of AI for Accessibility Evaluation, and we expect to close by 5:40 p.m.
+ + +And so now let's move to the first session. This is a session, as I mentioned, on Digital Accessibility Training + and Education. It's going to be moderated by Jade from The Open University in UK. And our two panelists will be + Sarah from the University of Southampton, also in UK, and Audrey from Access42 in France. So Jade, you can begin our + first session. Thank you.
+ + +JADE MATOS CAREW: Thanks, Carlos. Hi, everyone. My name's Jade. I am Head of Digital Accessibility + and Usability at The Open University. It's a real privilege to be here today moderating this session for you. + So I am joined by two wonderful experts in the field of digital accessibility training and education. So we've got + Sarah Lewthwaite, who is a Senior Research Fellow based at the University of Southampton. We've got Audrey Maniez, + who is Director at Access42, and I will let them introduce themselves properly when we get going.
+When you were registered for today, you sent in some kind of questions, and we had a look at them. And some of them + were quite wide and varied. And because we have the experts in the room with us today, we wanted to make sure that + the focus was really exclusively on digital accessibility training and education. So apologies if we don't answer + any specific questions. If you think of any questions along the way, please ask them in the Q&A. And you can also + comment in the chat as well. We are really open to welcoming questions and comments. It's important to know what's + happening out in the wide world, and so we can react to what you are also doing in this field.
+I think that's all I need to say. I am going to hand over, maybe I'll hand over to Sarah. Do you want to introduce + yourself?
+ ++ SARAH LEWTHWAITE: Hello, everybody. Welcome to our session. Thank you for joining us. My name is + Sarah Lewthwaite. I am here at the University of Southampton, where I lead a project called Teaching Accessibility + in the Digital Skillset, and we've been researching the how teaching of accessibility, the content, the approaches, + the strategies and tactics that educators use in the workplace and also in academia. And I am based at the Center + for Research and Inclusion. And I am also previously a member of the Web Accessibility Task Force for Curricula + Development, part of the Education and Outreach Working Group on the Web accessibility Initiative. I will pass over + to Audrey. +
++ AUDREY MANIEZ: Hey, I'm Audrey. I am a digital access specialist at Access42. We are a company + specialized in digital accessibility. We are based in France, so English is not my native language, sorry. And I am + doing accessibility for more than 12 years now. I do audits. I deliver trainings, et cetera. And I also manage the + training center for Access42, where we offer professional trainings based on accessibility. +
+ +JADE MATOS CAREW: Thank you. So we've got kind of a broad agenda within education and training, so + we are going to be looking at things like resources, training needs in the workplace, and how we can embed + accessibility in the curricula. But we thought we might kick off with having a look at some resources, how to get + started with this. It's quite a complex area with lots of different themes throughout it. + So who wants to take that first? Audrey or Sarah, how do we get started? What resources are we looking for when we + are getting started with digital accessibility? +
+SARAH LEWTHWAITE: Well, I suppose the first thing to say would be that the W3C has some really + interesting resources available. And Jade, you might want to talk to some of that. + Also, we've obviously got some great repositories of videos and resources. I know the University of Boulder, + Colorado, has a huge collection that they've been building, and Teach Access have also been collecting resources by + teachers about how they've been using Teach Access funds to then develop accessibility teaching in different + classrooms. + Audrey, would you like to comment? +
+AUDREY MANIEZ: Just to say that the resources you cited are really great, and it's important to + identify the authors of resources. That's a really great point. So resources that are created by the W3C are really + a good point because we can find a lot of articles on the Web. And some of them, well, a lot, gives false + information, wrong information, or outdated information. So it's you have to be really careful when you find + something on the Web about accessibility to really be careful who wrote that and when it has been written. That's + really, really important. Free resources are great, but be careful.
+ +SARAH LEWTHWAITE: I think with that, when we've been doing our research with expert teachers, + particularly in industry but also in academia, there's that question of where do you send your learners when they + want to continue their learning journey? So if you are new to teaching accessibility or if you have established + skills but you are aware that you continue to develop them, do reflect on how you continue to develop your skills, + where you go for good knowledge, because that information about how we learn accessibility is really important to + cascade to your colleagues, to your teams. Because obviously, we are aware this is not a static field. This is an + area where we have to keep developing our own learning, even as we are teaching, even as we are researching.
+JADE MATOS CAREW: It's a really good point. At the Open University, my team looks after a lot of + the training and advocacy for digital accessibility. And when we signpost to external resources, we go through a + vetting process to make sure that it's relevant and meaningful for our staff. And every resource that we post, we + make sure that it's dated. And we routinely go through and check those dates because links break and things happen, + things get outdated. So yeah, it's a real exercise in looking after that as a complete resource.
+I am slowly putting links in the Chat, by the way, for everybody. And we've just had a question in: What criteria + do you have in mind when deciding which resources to use and where to look at? How can an expert or even non expert + decide?
+AUDREY MANIEZ: That's difficult. That's difficult because you have to know a little about the + industry to know where, who, which authors are relevant, are great authors or great company or great organizations. + You have to know a little bit about them. The community can help to identify this. We have a brilliant mailing list + that can where you can post questions, ask questions, and the accessibility community will answer you. So I don't + have, really, criteria, but it's important to know who is who in the accessibility field, I think.
+JADE MATOS CAREW: I think the WAI resources as well, they are certainly, because of the way in + which they are designed and made, you know, by a panel of people that come together and work really hard to edit + small words and details in all of those resources, so you know that you can trust them. They've been through a + really rigorous editing process. So my personal, whenever I have to direct someone to a resource, that's always top + of my list. And there's a whole range of resources on that for lots of different needs. Lots of short resources for + simple advocacy as well. + Sarah, do you have any comments on that last question? +
+SARAH LEWTHWAITE: No, except to say, obviously, this is an ongoing issue in a lot of fields, you + know, the quality of online resources is a huge issue for anyone teaching in higher education. It's a question of + how you assess and critique and critically engage with online resources. But as Audrey and Jade have mentioned, + there are awesome, very solid, very well developed resources, good places to start from in terms of the field. + But I do realize this is a particular challenge in accessibility because it is fast moving, and also because so much + education takes place outside of formal learning environments. So you know, you will be learning on the job, + learning by doing. There will be informal training, training organized by different organizations, conferences. + There are a lot of places to learn. And traditionally, those have been where the majority of learning takes place. + So it is a recognized challenge, but it is worth investing time and thought into. +
+ +JADE MATOS CAREW:Well, Sarah, one of your recent research papers, which I'll post the link to in + the Chat, was looking at workplace approaches to digital accessibility education. And you raised the topic of having + a foundational knowledge of accessibility or a baseline knowledge. I was wondering if you could talk to us about + what that includes and who decides what it includes.
+ + +SARAH LEWTHWAITE:That's a big question. Excuse me. So yes, so as I say, with my project, we've + been trying to get close to practice but to look across a variety of different locations, from workplace to higher + education, to understand what characterizes accessibility as a field, as an educational field. So with that, I know + when we looked at some of the questions submitted to this session, people wanted those kinds of tricks and tools and + tips, and that's why we've kind of started in this resource place. But some of the questions that you will have to + ask yourself as an educator are quite fundamental in the sense that different contexts will make different demands, + and different learners will require different resources. And there's a different kind of cohort of central knowledge + that you need to establish. + And we wrote about this foundational approach because we realized that, particularly in the workplace, a lot of + effort is put into bringing people onto the same page. So we recognize that accessibility is a shared endeavor. It's + not located within one role. It shouldn't be about one accessibility expert serving an entire organization. A lot of + people need to know what accessibility is so they can collaborate and recognize it as a shared responsibility, a + professional responsibility.
++ So there are lots of dimensions to that, and when you are coming to this as a trainer or somebody trying to build + capacity in your organization, there are a lot of facets that come into play. For example, understanding what prior + learning you have, where your learners are coming from, what perspectives they bring. Where there might be + misconceptions can be also vitally important to helping people on that journey. And you'll be needing to do work + about defining what the core knowledge is for your organization, what your colleagues really need to know, what are + the essential points. And within that, there can be quite complex different worlds of knowledge that you have to + bring together. + So for example, we are here talking about Web standards, Web accessibility standards, but there's also a piece about + disability awareness, which can be more conceptual. How people understand "normal" I am doing inverted commas what + their average user is and try and break that down and unlearn some of the assumptions people bring into + organizations, sometimes from their educational pathway to date. So there's this kind of conceptual piece about + disability, and there's the technical piece. But then between, there's also a lot of knowledge that people need to + gain around how, how to do accessibility in the field, which can be to do with decision making, process, and often + collaboration between and across a workflow. + So that, then, introduces issues about whether you are bringing different roles together to learn about + accessibility and those fundamentals and how and when you should specialize. I have talked quite a lot there, so +
+I'll hand over to Audrey because I'd love to know from her side what the kind of view is on that.
+ +AUDREY MANIEZ:Okay. Thank you, Sarah. Great talk. + So yeah, the core knowledge for everybody in an organization to share on accessibility, as you said about awareness + about disability, a deconstruction, people, about what disabled people can do. I think there's also knowledge about + the political aspects of accessibility that is really, really why we are doing that. + Then for more technical, maybe, it's also important to know the user's need. That's really a key point in + accessibility for all jobs, maybe for designer or developer, whatever. Understand why we are doing things, to + resolve what kind of problem, what kind of issue. That's really the key point for everybody. Understand how users + navigate on the Web, how they access or not access to information. I think that's the basis of knowledge everybody + should share. +
+ + +JADE MATOS CAREW:Does that also include compliance and legislation? This is one of the questions + that we had in from a participant. So what role does that play in foundational training?
+ +AUDREY MANIEZ:Yeah, that can be complex, legislation. So a bit knowing that it's required in some + countries, but yes, knowing of it, it can be really complex. For example, in France, it begins to be really complex, + to follow all the news about the legislation. So yeah, it's important. It's important.
+ +SARAH LEWTHWAITE: I think in teaching, sometimes standards have quite a conflicted role. So some + of our experts talked about how + sometimes they won't use standards. They'll talk more to why this work is important and focus on the user and use + that as the kind of motivating principle for learners. Others talked about compliance in terms of finding ways to + introduce learners to standards without kind of dropping them in at the deep end, to use a metaphor, which means, + you know, using sometimes resources which translate standards into more accessible format for people who are new to + the field. Or maybe starting in a small place, taking parts of WCAG and exploring what they mean to give people that + entry route where they feel they can try things out, that they are applying their learning, and that they can then + move on to look at the broader standards picture themselves, feeling they've already entered and tried and + practiced.
++ But there is also an important conceptual dynamic, which is I think standards are so important to Web accessibility, + but how we present them is also important. So often our experts talk about presenting them as a floor, not a + ceiling, + in the sense that here's what we are going to try and do, and then you want to go and try and get beyond that. Not + that this is what you are aiming for and then you are done. So always encourage developers, designers, content + authors + to use these structures of Web standards, but also to scrutinize what they are doing. So you are not just learning + and + designing to the standard. You are always critiquing your own practice, examining what you are doing, why you are + doing it, how you are doing it, to keep that balance between the kind of the structure versus the creative piece + because creativity is so important in our field. And it's recognizing that Web standards can be part and enable + that; + that they don't close down creativity. Because we know creative learning is so important in terms of getting people + motivated and enjoying their work. +
+JADE MATOS CAREW: In my experience, different types of learners react to standards and guidelines + in different ways. So for some people, especially if they don't have, maybe, a technical role, they can switch off + if you present them with an overwhelming set of technical standards. So in my context, we have a lot of focus on + practical demonstrations and examples rather than going straight to the guidelines. + Do you think that following guidelines and standards helps people keep up with a changing landscape of digital + accessibility? So this is another question which has come in. How can we keep up with evolving ways of accessibility + and how it changes quite quickly sometimes? +
+SARAH LEWTHWAITE: I am going to hand to Audrey. How do you do this in practice?
+ +JADE MATOS CAREW: Okay
+ +AUDREY MANIEZ: How can we evolve, even if we follow the standards, that's the question, that's it. + As you say, the standards are not obstacles. They are just things we have to do to allow people to access Web + accessibility. And that's where it's important to know the user needs. I come back to that because if you know which + goal we are trying to reach, we can imagine lots of solutions. That's why we have to know what our users need, how + they navigate, because that allows people to create new solutions and just be in the success criteria and just yeah, + that's because it's really important. I mean, it's really important to begin the reflection about the thinking with + the user. You have to first you begin with the user, and then you can create a solution, I think.
+ + + +SARAH LEWTHWAITE:I think that's so important because, you know, the accessibility standards are + abstracted knowledge about what disabled people do online, how they use the Web. I think it's great that we've got + so many resources from the W3C that show where these are coming from and why they exist in terms of helping close + that gap with the standards. But yes, if you want to stay ahead of the game, it's always working with the people + whose knowledge is the foundation for accessibility Web standards. So it's talking to your users all your users + recognizing the breadth of your users. And it's also hiring for your teams and making sure that your teams reflect + the world as it is, which means including disability, including disabled people, and recognizing what excuse me + recognizing what we have to bring ourselves to that conversation also.
+ + + +JADE MATOS CAREW: This links to another question that we've had in. Thank you for all of these + questions. Please do keep them coming. And it's about AI. And the question kind of says this might be a bit more + relevant for later, but this is really forward thinking stuff. How are we dealing with all of these kind of future + evolutions, things like AI coming into different areas of accessibility? + And there's even a question there about will the accessibility requirements sort of become redundant with AI doing + most of the work in building websites? Maybe we won't need a need for training in the future. What do you think? + Audrey, what are you seeing in the field? +
+ +AUDREY MANIEZ:Oh, that's a really complex question. I don't think AI will solve every problem of + accessibility. Most of accessibility issues are based on understanding context. Even today, we have automated + testing that tests as really, really little piece of our requirements in accessibility. Well, I am not sure AI will + help more to detect or fix issues. They can help in other fields of accessibility, but to fix issues, that I am not + sure. Well, really, I am not a specialist of AI, really.
+ + +SARAH LEWTHWAITE: Yeah, I think I am sure this will come up for discussion later, and I think + there will be some really interesting answers in that session. But I think the concern I have sort of coming from a + background in disability research and disability studies, critical disability studies, is that data, be it social + statistics, be it those statistical views of populations driven by data tend to be highly normative. Where data is + normative and these ideas of average arise, often people who are positioned on the edge of that are then missing and + often further marginalized. So I have major concerns over AI in terms of what it deems "normal," be that websites do + we think the majority of websites are accessible? What are these tools able to do in view of, as Audrey says, the + changing and the contextual nature of accessibility?
+I think there are some really interesting discussions happening, and there are some good people looking at how you + do work data so it is more inclusive. So Jutta Treviranus talks about the bell curve and the need to cut, take a + lawnmower to the bell curve so that you are always including and weighting data to take care of everybody, + basically. But that may be a slightly different subject to this automation of testing dynamic. But I just think so + often people are looking for ways to cut real people out of the system and the process, and I think it's really + important to recognize the value of authentic experience of our products from our users. +
+ +JADE MATOS CAREW: Are you seeing links between accessibility and AI or XR, AR, VR, and is that + being brought into + training and education for accessibility? New, evolving areas, are they being brought into the curricula, do you + think?
+ +SARAH LEWTHWAITE: I think I don't want to sound downbeat, but I think at the moment, there are + some tussles + happening in the computer science curriculum, which sometimes mean AI is coming in and pushing out other areas. So + some of our educators that we've interviewed talked about the need to kind of harness new fields and make sure our + accessibility is part of that from the get-go. So yeah, we are seeing AI and accessibility courses starting. We are + seeing people putting AI at the heart of XR and VR and also robotics. And there's some really exciting things. + Whether those are coming from the mainstream of those disciplines or whether they are kind of accessibility people + kind of busting in to make things happen I think is less clear. So I can't speak to that overarching picture. But + it's really important to keep accessibility in these innovative spaces because standards and so on tend to come a + step behind just by virtue of how they are made and created.
+ +JADE MATOS CAREW: How can we keep up with that? There's a question in the Chat: How can we cope + with the fact that advice from yesterday may no longer be relevant today because of evolution in technology?
+ +SARAH LEWTHWAITE: I would say, as we said before, it's that perennial problem. It's an ongoing + issue. And where you can, it's maintaining that user research, that accessibility research with real people that's + going to help you bridge that gap. So keep that value in your developmental practice, in your learning practice, and + then look at how you cascade that knowledge through your organizations. Because there is an organizational change + piece here, I think, that we've not talked about yet. And it's a tension for me. My research is very much about what + teachers do, what educators do in that kind of local space of the classroom. But there are also the sociocultural + dynamics that push and pull on what's possible in education, in the industry. And there is that need to think about + the organizational piece. And I know conversations about accessibility maturity and some of these overarching issues + are really important, too.
+ +JADE MATOS CAREW: Well, let's think about that change management piece. It's so relevant to + accessibility and how we handle it in the workplace. Audrey, I think you have a lot of experience in terms of + training in workplace situations. So how in your experience, how is accessibility incorporated into a professional + development context?
+ +AUDREY MANIEZ: We so yeah, we do a lot of training. We train a lot of people that are working in + organizations. We train a lot of developers or auditors. And it's clear that, as you say, you talked about managing + and so on. That's people to train their employee have already a political strategy of accessibility. That is the + first thing that is needed in an organization, private or public. If the director, if the company, has no + accessibility policy, so there's no training in the for the employees. So that's really a global political subject + in companies and in public organizations so that people can access training. So that's it. So yes, we need a clear + strategy in organizations so people can be trained to accessibility. It's not an individual initiative that comes to + training. That's really important. Oh, sorry. So in the workplace, that's what I can do. I can talk well, that's + what I can do.
+ +JADE MATOS CAREW: Well, if we can pick that training apart a little bit. So something that + interests me in particular is moving away from providing just guidance and just one off or ad hoc trainings to + people that perhaps people go through in a very passive way, they don't really engage with the materials. So in your + experience, for both of you, and Sarah as well, you are interested in the pedagogy behind how we can actually make + people do this in reality. So what does the training look like? How can we make it effective and meaningful?
+ +AUDREY MANIEZ: It's really accessibility jobs are really experienced work. Even if you have + followed training for two or five days, like we have, for example, at Access42 for designers or developers, after + that, it's really important to have a time to really train on real projects. For example, at Access42, for our own + needs because we do it. So we train a lot of auditors that we have. So the time they have been teaching, so a + training, it's it can take four to six months for the people to be really independent in their work. Really. So that + can be as you say, you have the knowledge, and then you have to practice to be really effective, to be good at what + you do. And you have to be well, you will be better if you are incorporated in our community.
+ +JADE MATOS CAREW: Mm hmm.
+ +AUDREY MANIEZ: That's really important, to have others to share with, to ask questions, to share + practices. That's really in an organization, it's really important, yeah, community.
+ +JADE MATOS CAREW: What do those communities look like? So for example, at the Open University, we + have social media, internal social media channels. We provide drop ins and lots of opportunities for staff to + network. Different things like that. What kinds of things do you experience?
+ +AUDREY MANIEZ: In our company, for example, it's truly a place to share every day. So like we do + audits every day. So we share every day about what we found in accessibility. So we have a chat where we, each + other, speak, to ask questions, to find help, to fix some issues, et cetera. And what is great that we have chat + that it's like a knowledge base. And if we recruit a new person, he can read all we discussed for a year, two years, + and that's our knowledge base. And that's truly that's our documentation, our own documentation. But that's really, + really interesting. But that's the same with what we have with the mailing list, if I can talk about the WebAIM, + that's really two rich resources that you can search in. That's really, really great documentation too. So yeah, + community share. So that's what we do. And once a month, we all have a meeting together to share in the meeting + video about problems, about harmonize our way to work, our way to tell things, to present things to developers, et + cetera, et cetera. That's what we do.
+ +SARAH LEWTHWAITE: And if I can add, when we've spoken to, basically, government experts about how + they build those large scale communities, so if you do have these Q&A spaces, questioning spaces for people to trade + information and, you know, knowledge that's really specific to your organization, we've seen strategies by the + managers of those lists where the experts will purposefully hold back slightly when a question is raised so that + there's an opportunity for people in the community to start to express their expertise and practice and bring to the + table, maybe for the first time, their knowledge.
++ And then you've still got that kind of safety net of the experts + on the list ready to step in if there's any accidents or if anything is slightly incorrect. So if you are building + these sort of online spaces where you are sharing information, think about ways to help bring the new people on + board and let them step into their expertise and express what they know to help build that expert capacity more + broadly. So it's not always falling down to the champions of accessibility to be the one person who knows + everything. Because we know that model is precarious. If that person leaves, you lose the expertise. So, so much of + this is about broadening the knowledge base. And I know many people talk about the importance of everybody knowing a + little bit about accessibility. It's from this point then we can sort of build and build up the expertise. +
+ +JADE MATOS CAREW: We have a really good system at the OU where if we get asked a question, the + first stage is to + direct them to our internal social media, to ask there. And also, Audrey, as you were saying, search through what's + happened before and whether it's been asked in the past. That's a really, really useful tool to have. But also it + encourages other people who aren't accessibility champions to jump in and answer and share their expertise. And then + if we still can't have the question answered in that space, that's when members of our team will come in and try and + give an answer from an expert perspective. Thank you. Sarah, I want to ask you about what skills so we've spoken a + lot about informal ways of sharing knowledge, but what about the formal ways? So what kind of skills do people need + to teach accessibility effectively?
+SARAH LEWTHWAITE: The skills to teach accessibility?
+JADE MATOS CAREW: Mm hmm.
++ SARAH LEWTHWAITE: Well, so I think one of the reasons I started my project was because I was aware + that + sometimes, particularly in academia, where maybe there's more teaching knowledge, more teaching experience, there + isn't necessarily the accessibility expertise that you see in industry. And likewise, in industry, I think a lot of + teaching knowledge is quite hard won by doing the teaching and gaining the knowledge that way. + So I was interested in how the pedagogic knowledge and the content knowledge, the knowledge about accessibility, are + fused together. So what the teaching of accessibility requires specifically. And how to kind of build that knowledge + up through research and cross case research. So I would if you are on this call, there's a lot of open access + research about the teaching of accessibility, which I think often isn't where we first go when we are designing + teaching; right? There are shared curricula. There are research papers which you can draw on. We wanted to do cross + case research so we could look at a variety of contexts and what's important in those contexts. And of course, it + does vary depending on who your learners are and what you are trying to do. + So some of the questions that I would put to people on the call is about establishing what your learners need to + know about accessibility, what is essential, what are your learning objectives? Try to set those and be clear with + yourself so that you can then kind of put those into action. And it's difficult because I also recognize there's a + lot of expertise in this room that we can't see. So you know, it's recognizing that. +
++ Alongside these accessibility communities we've talked about, I think there's a real need for teaching accessibility + communities, places for teachers and trainers to share what they do and start reflecting on what they do, naming it. + So don't be afraid of pedagogic language and start to think about, you know, reflexive practitioner, thinking about + learning by doing rather than learning through trial and error. You know, how do you when you are getting your teams + to do projects, as Audrey described, when people are practicing in the field or in simulated situations, if you are + teaching a graduate program and you are running project based learning with your learners, there are a range of + things that you can put in place around that team to help them, to support them with resources, to check in around + what skills that team might need. +
++ But I suppose I am talking around a range of issues. But I think I want to come back to that key point around + disability awareness by understanding the users, understanding disability, thinking again about ourselves, really. + That awareness piece being so fundamental. And then with that, this process piece about the doing of accessibility, + how are you going to give your learners opportunities to put knowledge into practice? And then also the technical + piece, that there will be a certain range of techniques coding, et cetera that is also part of that kind of learning + by doing. So it's bringing together those three, but recognizing that they are quite different worlds of knowledge + that you are having to bring into like synthesize together. So you will have learners who are much happier coding, + and you will have other learners who are much happier getting into the usability piece, trying to understand what + people want and need and thinking about complex issues. Overall, accessibility does deal with uncertain knowledge. + You know, we have to work hard to know what to do in any given situation. There aren't always straight answers. And + Web standards take us so far, but they can't answer all the questions we have about what our users need. +
++ Now, for some learners, that's deeply uncomfortable. They want to know what to do in any given situation. So I think + and it's a real expert competency, dealing with uncertainty is one of those markers of expert knowledge in a vast + majority of fields. But for us in accessibility, it's kind of like dead center. So I think often our experts, you + know, and do read the papers that have been shared in the Chat, and love to hear your thoughts on that as well + because obviously, this is a huge field. I am not saying we've answered anywhere near all the questions. We are just + getting started looking at this piece. But recognizing that that uncertain knowledge, you know, working between + compliance versus the realities of complex everyday experience, is a challenging space. And it has a range of expert + competencies that you need to grow. And for some, it will be uncomfortable. So part of it is often bringing that to + examples that are as clear as possible. +
++ So often when we've spoken to people in organizations like Audrey's, if you are going into an organization, you want + to show them their own websites and how they might work or how they might not work. When you are talking about + disabled people, you might want to be naming members of your team and saying, you know, is this going to work for + Jeff? Is this going to work for me? You know, like always trying to bring it back to something concrete, something + real, so it's no longer abstract and somewhere else. Because the reality is much closer. It's in our everybody's + world. +
+ ++ JADE MATOS CAREW: We've had success with that at the OU, when we developed our curricula for + developer training. + So using the WAI curricula modules. And using them as the foundation to add kind of really relevant contextual case + studies, examples, demos. So we've had success with that and making it really relevant to our audience. + And another thing we had success with was accountability partnering, so pairing up people from around the OU from + different staff groups and having a really practical example of using guidance and training to making real fixes in + their own documents or designs. So that's a really useful thing that we've come across. + Where was I going next? There's been a question in the chat, it's a massive one: How can we integrate accessibility + into the university curriculum? So taking into account the various roles within the accessibility field and their + associated processes. So does anybody want to take that? Sarah, I think that's probably another one for you. +
++ SARAH LEWTHWAITE: All I can say is it's going to take a big effort. Because I think, I mean, I've + drawn a + distinction between academia and the workplace, but I recognize that the university is a workplace. And as a + workplace, it's quite a complicated workplace. So I think it has to be an effort at a number of different levels + that run across a range of different groups. I mean, really, I should throw it back to you, Jade, because I know the + Open University is world leading in so much of this. But I think there's a lot of work that's been done around + accessibility maturity for higher education. There's really great conferences and networks, so I am thinking of + HighEdWeb and their Accessibility Summit, which I think is annual. Obviously, there are lots of I think you've + posted the mailing list on assistive technologies, which serves learning developer communities particularly. And + there are, obviously, disability support dynamics as well. +
++ I think the challenge for higher education at the moment is that accessibility is largely thought of as being for + students. And it doesn't recognize our disabled staff at the level it should. And it doesn't recognize, in that + respect, the kind of platforms that staff have to use, that researchers have to use, and that it's about more than + just serving our disabled students. It's about the entire university estate. So for me, it's an accessibility + maturity question, and I know there's really great people working in this space. I know AbilityNet have done a lot + of really good stuff about accessibility maturity and higher education. So if you are looking at that piece, that's + where I would direct you to go. But I think it's always a work in progress. But I also think the in Europe in + particular, you know, the new regs on mobile accessibility and the Web mean that our universities are being audited + now for the first time on their public facing accessibility. And that's a really teachable moment for the sector in + terms of universities trying to turn their ships around. I think we deal with a lot of legacy systems in particular, + which are troublesome. But in my experience, in the UK, certainly, it's becoming more positive in that beyond just + serving our students and recognizing our duties and commitments to them, there's a growing understanding of the + responsibility that we have to serving wider publics. And I think there's more mobilizing of political dimensions + amongst staff to fully recognize the breadth and diversity of our staff groups. +
++ As I say that, I know disability can be at the bottom of the list within even equality, diversity, and inclusion + agendas. But I do want to be hopeful about us trying to make changes where we can and using these opportunities to + put disability and accessibility front and center. + Over to you, Jade. Now tell us what you are doing at the OU. +
+JADE MATOS CAREW: I was going to throw it to Audrey, actually, and ask ask about barriers to + embedding + accessibility into your workplace curriculums and how you deal with staff training. So barriers to embedding + accessibility into your training and your curriculum.
+AUDREY MANIEZ: In training, you mean in university or in general?
+JADE MATOS CAREW: In general workplace. So in your practical experience.
++ AUDREY MANIEZ: Okay. The first barrier is always the same, it's the political barrier. Like if the + direction I + want to train people, so people will be trained, we can be faced with the problem of accessibility of the material + itself. So tools that we are using to teach, so that's a problem when you have disabled students to train, and the + content we deliver to people, that content, those are main barriers to train accessibility. That's mainly that. + And I like when Sarah said in training, students want an answer to each problem. That's a barrier in training too. + Because they want a clear answer to each problem they will be faced with in the real world, and we can't give them + that. But teaching or barriers, that's all I can say, the tools are really a big problem. Because the learning tools + really are not accessible at all. Can't allow us to give accessible content to our students, that's a big problem. +
+JADE MATOS CAREW: So in what ways are the tools so you mentioned the technical requirements of + tools. So what + kind of barriers do you see there?
++ AUDREY MANIEZ: The technical requirements? Yeah, they are to be WCAG compliant, and they are not + WCAG compliant. + Tools are really like LMS, they are really not taking accessibility in their roadmap. A few a really few tools do + that. So that's it. + I made a little study last year on CS tools, and we found over 30 tools for, like, LMS, just one list in + accessibility report, it's really a few tools. So those tools can't give accessible interfaces to students, and + that's a big problem. And the most in university. +
++ JADE MATOS CAREW: Okay. Thank you. I am just trying to keep up with the chat here and check to see + if there are + any questions. Sorry. Just bear with me for a moment. + Sarah, are we are you Sarah, sorry, did you mention that you are leaving or are you staying before I direct any + questions? +
+SARAH LEWTHWAITE: I am afraid I am aware this session was running until 2:00 sorry, 2:00 local + time. I + appreciate it's different in Europe. So I only have a couple more minutes.
+JADE MATOS CAREW: Okay. I am just wondering, there was one other question here for a university + context. In a + university, you likely have the opportunity to integrate accessibility into other disciplines so engineering, + political science, lots of different things. Do we have any examples of how that's happened, where that's happened, + any success stories?
++ SARAH LEWTHWAITE: I mean, I think it is happening. I am not sure there's visibility. And I think + that's one of + the challenges as a field is regarding, for example, just the level, knowledge of where and how accessibility is + being taught. So I am aware, Kristen Shinohara at RIT did that survey of colleagues looking at the teaching of + accessibility across the USA, and you know, whether it's appearing in software engineering, and other fields. I + think that there is just a piece to be done about where accessibility is because at the moment, you only really see + the specialist departments publicizing online where it's being taught. So you know, you will see that at some in the + UK, say at Dundee, at the Open University, at other leading institutes, but it's difficult to know where and how it + is being taught. +
+Of course, it is being embedded, so I would say look at what research is coming out about this, and I think there + is + a lot of work about teaching accessibly, you know, which I know the University of Washington have done a lot on in a + range of fields. So that is building up. But it's a difficult picture to assess, and I think if you are somebody + watching this and you have conduit to some of the professional organizations, there is that question of that raising + knowledge of where it's being done, how it's being done, and how it's being done well, but I am sorry I don't have + those answers. But I think in the next phase of my research, I am very interested in trying to look into that more + fully. I am going to have to step away, so thank you very much, everybody. +
+JADE MATOS CAREW: Thank you, Sarah.
++ AUDREY MANIEZ: In France, we have a degree at the University of La Reunion that is focused only on + accessibility, + to train accessibility managers to create they are trained to create an accessibility culture inside organizations + to manage accessibility requirements, audits, trainings, et cetera. So that's a real degree. That's the first year. + That's a really, really great project. Yes, we are really proud. So that's really and we can see some units of + teaching about accessibility in some degree at university. Really few, but you can find some accessibility words + sometimes in the degree programs. So that's a little shy, but yes, maybe that will come a little more in the future. + And I think that's linked with the job market needs. Since the job market, jobs do not require accessibility skills, + university won't train people on accessibility. I mean, we have to we need to have a real need in the job market + about the skills. Organizations have to put accessibility in job requirements so it can be a real skill to have, and + I think it can be a circle from that. +
++ JADE MATOS CAREW: That's one of the ways that we are looking at this at the Open University, so + making sure that + accessibility is visible in everything that we do. So if we are talking about accessibility, if we are hosting a + presentation, that presentation needs to be accessible in itself. And I think this is really important for students + to see as well, that accessibility is being prioritized and that it's visible in learning spaces. And I suppose that + means that it's a more holistic approach holistic and informal approach to advocate for accessibility and raising + awareness and building those skills in an informal way. + Shall we have a look at the chat and see if there's anything we haven't answered before we move away from this? Just + seeing if there's anything I have missed. Thank you for all of your questions. There have been some really good + ones. + There was a question which says could you recommend any courses that teach the accessibility requirements outlined + in EN 301 549 in plain language? + I suppose we'll direct you to right at the beginning of the chat, we posted links to some of our favorite resources. + In particular, the WAI resources from the W3C. And on those, there's an Accessibility Fundamentals course. +
+AUDREY MANIEZ: Yeah, maybe not about the EN, but yeah.
++ JADE MATOS CAREW: Maybe not in particular, but I suppose it's the reason it's beneficial, + obviously, is because + it's referencing the most up to date materials. + Do you have anything that you'd like to recommend, Audrey? +
+AUDREY MANIEZ: About the EN, no, I don't have. Just the document itself. But no, I have nothing + else to + recommend that we have.
++ JADE MATOS CAREW: Are there any other final questions, perhaps, that haven't been asked or anything + that I have + missed that's relevant to our conversation about education and training? + Anything else? + What is your position this is a good one that's just come in. What is your position on certifications such as the + IAAP, which is the International Association of Accessibility Practitioners? Audrey, that's a good one for you + because you'll have a lot of familiarity with this. +
++ AUDREY MANIEZ: With the certification? With IAAP, I don't have IAAP. My position is that it's not + required to be + a good professional. We have really good professionals that don't have a certification. But for some people, that + gives them a structure to a point, a goal. That can be great to have the certification. I don't know the content of + the certification, so I can't tell if it's a good one or not. But the concept, the thing is something good because + you have a certificate, you have proved you can do things, and that's great. + We, too, do some certification at Access42. We do training, so people have to do some things, we evaluate them, and + we give or not give certification. And that's great for some people to find a job because they can prove to their + employer they are capable of doing what is written on the certificate. +
+JADE MATOS CAREW: I agree. Actually, I think that it demonstrates a commitment to accessibility, a + professional + commitment to it. And from my experience with IAAP, the content of that exam is quite broad and wide ranging. And it + really enables somebody to focus on upskilling their knowledge in that area. So I think they are, on the whole, + positive.
+AUDREY MANIEZ: Okay.
++ JADE MATOS CAREW: I think they are still quite new, though, so we've yet to see the impact fully of + these + certifications. + I've just noticed that Sarah has dropped back in, into the meeting. Do you have anything to add on certifications in + your experience? +
+SARAH LEWTHWAITE: I am sorry, I may have a slightly noisy background, but I think the + certification's really + important, as the reasons you've raised. I think the only challenge sometimes is and stop me, Jade, if it is too + noisy in the background...
+JADE MATOS CAREW: It's okay.
+SARAH LEWTHWAITE: ... is the cultural dimension, is the different territories have slightly + different requirements. + And sometimes sensitizing those kinds of certifications for, say, UK or U.S. or India, it's really important. And I + think that's something the IAAP are doing, and that's really great.
+JADE MATOS CAREW: Agree. + Right. We'll close there and hand back over to Carlos. Thank you so much, Audrey and Sarah, for your time today and + for your answers. I've got a couple more links that I'd like to post in the chat just to a couple more places that + we compiled before we met today. And thank you for those who have posted links in the chat also and for your + questions. So handing over, Carlos. + +
++ CARLOS DUARTE: Thank you so much, Jade, and thank you also Sarah and Audrey. It was a really, + really great discussion. It's great to see that there's also a lot of positive feedback coming into the chat. + And we'll now have a ten minute break, so we will be back at a quarter past top of the hour. Jade is busy still + pushing more links into the chat. Great. If any of the panelists also would like to answer some of the questions + that you haven't been able to tackle live, they are still in the Q&A, so please do. And yeah, in 10 minutes, we'll + be back for our second session on mobile accessibility, so see you in a while. +
+ + ++ CARLOS DUARTE: Okay. I think we are ready to begin our second session. I hope everyone enjoyed the + break. Just + another reminder. You can use the Q&A to pose any questions to the session participants, and we'll also be + monitoring the chat, but we are mostly using that for any technical issues or for the session participants to share + any links to resources that are relevant. + And so for the second session, the topic will be Mobile Accessibility. Detlev from Germany, from DIAS, is going to + moderate it. And our two panelists will be André from Universidade Federal de Lavras in Brazil, and Paul from + Digital Accessibility, in English, the English translation, in the Netherlands, will be joining us for the next + hour. You can take it away, Detlev. +
+DETLEV FISCHER: Hello, and welcome. I am trying to emulate what Jade said because it's always + difficult for me + to find the right format for introducing everyone. And we had I was prepared to introduce the panelists, but I think + it's probably better if they introduce themselves. Carlos has already given the names, so before we start with our + topic, I would just like you, both of you, to just spend a minute to just say who you are and what you are doing, + and I'll add to that, and then we can start. Do you want to start, Paul?
++ PAUL VAN WORKUM: Yes, that's fine. Yes, I am Paul van Workum, and I am working now a few years in + the field of + app accessibility. I created I am on one of the founders of the Appt Foundation, and we created a knowledge based + platform with a lot of information about app specific things, like how do assistive technologies work, and how do + the yeah, how can you fix certain issues in certain code bases. So that's an interesting resource, I think. + Besides that, we have a company where we do everything about app accessibility, from training to testing to user + testing, and also have an app organization with evolving on maturity level. Besides that, I work at the Dutch + Government helping there with the most critical apps and doing some basically supplier management, to help suppliers + of apps to governments, to help with becoming accessible. That's it. Andre? +
+DETLEV FISCHER: Do you want to pick up, Andre, and say a few words about yourself?
++ ANDRE PIMENTA FREIRE: Yeah, sure. Hello, everyone. First of all, thanks to Letícia, Carlos and all + the + organizers, and thanks to Detlev and Paul for sharing the session. I think we have a very good time here sharing + some lessons learned and challenges of evaluating the accessibility of mobile apps. + I am an assistant professor at the Federal University of Lavras. Officially in the field of human computer + interaction. I also do teaching in optional courses on accessibility. And we've done some research, among other + things, on mobile accessibility, including evaluation. So I hope we can share a couple of lessons we've learned in + looking at a couple of different issues on mobile accessibility evaluation. From technical issues we've done jointly + with colleagues on automated evaluation on how to do manual audit, auditing of mobile accessibility, work on + different platforms, and even some more recent studies we've done on the policy level. We may have a couple of + particular issues in Brazil to share, which might be applicable to many other developing countries on having what's + not such bad regulation and legislation on accessibility in general but what is now, we say well, I think it's eight + years old accessibility legislation covering digital accessibility. Which just left mobile accessibility out. And we + have looked into how surveillance and law enforcement has worked in that scenario. We have some recent advancements + in Brazil. Reinaldo Ferraz is here, he has done a lot of work with the brazilian national regulatory body to put out + a new set of guidelines specifically for mobile accessibility in the end of 2022. So hope we can share a couple of + lessons, both from the technical side going through to processes in universities and research agencies and in + companies, and what we've seen in policies in different countries, both from advanced legislation, such as in + European Union, and other countries that are kind of catching up with that. So looking forward to very nice + discussions here. +
++ DETLEV FISCHER: Okay. Thank you both. I will just say just a few words about me so you know who I + am. I am here + managing director of DIAS, which is a company specialized in accessibility testing and consulting. And I have been a + member of the Accessibility Guidelines Working Group of W3C for I think about 12 years now. So I am actively + involved + in shaping the new WCAG 3 standard, which is kind of a difficult thing or challenging thing. So I've also been + involved in policy consulting in the sense that I have been a member of the WADEX, Web Accessibility Directive + Expert + Group, that helped the European Commission to devise monitoring scheme so that the Web Accessibility Directive can + be + monitored across the Member States. So that was interesting as well. +
++ Yeah, that's my personal background. And I thought I'd start with a very, very quick run on, to give some context of + what we are discussing today. And that starts with the name of the session itself. It's called "Mobile + Accessibility." + That has been a bit under attack within the Working Group, the Web Accessibility Guidelines Working Group. Because + it's increasingly a misnomer. There was a time when it was perceived as separate; there were the mobile apps on the + smartphones, and there was the world of the desktop. But we increasingly see that apps are also run on a tablet, + which + has a keyboard, so there's keyboard accessibility issues there. And we also see that increasingly desktop + environments + have touch, for example, something which was not at least not common ten years ago or not available ten years ago. + So + those two worlds seem to be slowly growing together, and we cannot make assumptions anymore so clearly as we used to + do in the past. And I think the result is that the Working Group has moved away from calling out mobile + accessibility + to something which is more looking at different input modalities. So you basically have two different input + modalities. One is coordinate based, if you like, so that's your pointer. Whether you guide that with a mouse or + with + your finger on the touchscreen or whether you guide it with a grid when you are a speech input user, you can guide a + pointer, a virtual pointer. And the other thing is the traditional keyboard method or input modality, where you have + other assistive technologies which are based on that. For example, a switch user has some motor disabilities and + uses + a switch to interact with a webpage, for example, or an app, they would need a good keyboard accessibility to + operate + things. So that's the way things are going. So mobile accessibility as a term, I think, is probably on the way out. + But that doesn't mean that it's not interesting to discuss the particular issues we have. +
++ So one of the things we are faced with as evaluators is there are no specific or at least in the normative space, in + the space of European norms and standards and directives, there are no specific mobile guidelines. They are + basically + all derived from Web guidelines. They have just taken out six of the Web accessibility requirements and basically + put + the rest into Chapter 11 of the European norm, which is called Software, and we are now supposed to use those to + evaluate apps because they count as software. And obviously, there are some problems with that. And that's what we + are + probably talking about later on in more detail, at what points you see there are differences where the standard + requirement cannot easily be applied to apps, and what can we do about that? +
++ So there also have been some requirements which have been around in other recommendations for mobile accessibility + for + some time, which have only very recently become standard requirements. For example, WCAG 2.2 now has something + called + "target size" which gives us some idea of how big a target should be, and that has never been a standard requirement + before. But several recommendations for mobile apps and accessibility of apps have included that. And also the + framework of the big two operating system providers, Apple and Android, have their own guidelines which have things + like recommended touch target size, for example. +
++ So it's an interesting field. The thing is because of the difficulty of applying requirements which have been + written + for Web onto the mobile space, we also have the problem that in quite a few places, it's very difficult to apply + them + correctly. And you have a fairly high margin of error or of uncertainty where you think, you know, is this a + reliable + assessment? Especially in a context of conformance assessments, or if we are looking at it from a for example, from + a + monitoring perspective, where some public body needs to say this is accessible, you know, you have met all the + requirements, you know, is that easy to do? And how do we do that? +
++ So the big difference, of course, is apps normally come in a windowless environment. Usually they are in a + windowless + environment, not like the desktop. And they are often exclusively designed for touch. And we see that when we + evaluate + things, that most of the problems we find are in the area of linear access. For example, if I turn on a screen + reader + and want to traverse elements, things are not accessible. Or if I connect a keyboard, I don't have proper keyboard + accessibility. And that's simply because people think, well, this is for the mobile use, so you know, these are apps + on the smartphone. They are exclusively used by touch input. So the rest is not relevant. But the Standard says it + must be keyboard accessible. And also, if you have a screen reader turned on, then you have to have linear + accessibility, and you have to make sure that all the elements you encounter give you their proper name and give you + their role so that a blind user, for example, will know what to do with them. So that's the situation we are in. + And another important difference is that the apps are not open for coding inspection. So when we evaluate websites, + we + can turn on the developer tools, for example, and look at the source code of the page. And we can check things + there. + That's not available when we audit apps. Like in normal cases, I know that Paul has recommended as part of the app + procedure that the auditor inquires what kind of platform or what kind of developing environment has been used, and + that's certainly fine. But in our practice, when we normally audit apps, we don't have that information, simply. We + may get it, but even then it may be difficult for us to know what exactly technically needs to be done to make + things + accessible. Because there are so many different developing environments. So we don't have that openness of code. +
++ And + also, we have a much less extensive tool set, and that's also something Paul has indicated they have some ideas on + how + to support or to increase, improve the tool set for us to evaluate. We don't have, for example, the bookmarklets we + have in the website to check things. We don't have the developer tools where we can run plugins, giving us automated + tests. There's some of that, like there's an accessibility scanner at Android, and there may be some others which we + hear about, but it's a much less broad and powerful tool set we have at the moment. And that means that for testing, + we have a very strong reliance on using the screen reader to work out whether all the elements in the app have + proper + accessibility. So you turn it on, and then you can hear what accessible name is behind an element or whether it can + focus and, you know, whether it has the right role and whether you know, for example, as a blind user, what to + expect + and how it will behave. +
++ So there's also another difference that the operating system for apps is now what the browser is for the Web space. + You know, the accommodations you can make in the browser, they are not available, but there are other accommodations + you can make as a disabled user on the operating system level. And there are a number of questions with that + regarding + conformance. Is it enough if you can, for example, improve contrast of elements on the operating system level, and + then you meet the requirements? Or is the author responsible? So this whole question, what is the operating system's + responsibility, and what is the author's responsibility, we have here in this field, which we'll get back to, I + think. + So I think that's probably enough for now. +
++ Just to maybe open with the participant questions, we had a number of + participant questions that were already given to us before the meeting, and I just mentioned them briefly, the + topics + that we had. One is around available tool sets, and that's certainly something that maybe both Andre and Paul have + something to contribute. There's also something about testing guidance, you know, how do we know how to test, when + is + something okay, when is something not okay? There's a scarcity of information on that, I think. + The next is, you know, there are different platform service capabilities with apps. You may not always be able to + meet + the requirements of the European norm, for example, depending on how you interpret it. So how do we deal with that? + Another topic that was raised that we will cover, I hope, is Web views. Web views means you have an app which is a + native app, but within that app, you have areas, you have views where people just pull in information from the Web, + and that often creates difficulties because you suddenly have a break in the navigation. You may not be able to + focus + those Web views easily or get out of them easily. They may behave quite differently or may even carry a different + navigation compared to your native app. So that's an interesting question, how do we deal with that? + And there was one specific question on reflow, which we may also cover. What is the requirement for reflow? That + means + that, for example, if you zoom in on in a Web browser, you get a different design, which is also used on a mobile + phone with a little hamburger icon often for the navigation. You know, what does this requirement that content can + reflow when you zoom in, what does that mean for a platform which doesn't have a zoom in most of the time, where the + magnification of text normally happens in the operating system by turning on the zoom accessibility function? So + those + are the four questions I just briefly report. +
++ Maybe we start with the tool sets because I think several questions honed in on that. What are the ways or are there + good tool sets to help us in this difficult evaluation task of mobile apps? Does one of you want to pick that + question + up? +
+ +PAUL VAN WORKUM: Yeah, I would be willing to start.
+DETLEV FISCHER: Mm hmm.
+PAUL VAN WORKUM: I think that there's a few levels. One is the process, like how to approach the + testing. And + yeah, in the Netherlands, each government is responsible for each website and each app to make a full report based + on WCAG EM. But for apps, it's quite challenging to have WCAG EM evaluation to result in the same results for all + auditing firms. And it's because some firms are trying to find the URLs, and then they find one URL that's just App + Store or Google Play Store link, and there is no way, of let's say, cutting it in pieces and making a sample because + there's no URLs available because there's no URLs. So what we try to do with the process is you identify an app + itself differently than the website. So probably you need the version number because you can't download it like you + can do with the website.
+Secondly, like a screen would be a good alternative for a page. So then if you have a lot of screens identified as + your scope, you are able to make a sample. Like certain things we wrote differently in the Appt Evaluation Method. + That's basically for the process. You need to have a different process to test. And we identified some key + differences. And we did it in the assignment of the Dutch Government, and it was quite a small project. But I think + it would be very interesting to see how you can do it, how you can make the kind of evaluation method compared to + WCAG EM, that a lot of companies are using and it could be the next standard. Because we see in the Netherlands + auditing firms are doing it differently because there's no one way, one evaluation method that is described in such + a way that each company does it in the same way. So that's I think it's an interesting source, and it's the best + that we've found. But I think there are so many there's a lot of things to do here still on this process.
++ And then, of course, you have the interpretation of WCAG or the EN standard. You gave already a few examples. And + yeah, so many things going on there. And some things you can't test. + Maybe I will give some examples. Because everyone knows that on the website, there's the language of the page should + be identified. And also the language of parts of the page, let's say a linear, should be identified. + In software and mobile app is software it means that the software, the language of the software should be + identified. So let's say that I have a Dutch app and identify the language as Dutch. Meaning that I am, like say I + am complying to the EN standard. The second, like 3.1.2, it's like language of parts, I could read out everything in + Chinese. So if when I am testing my screen reader reads out everything wrong because it's not using the Dutch + language, but on the scale of the app, the language is set correctly, so I am not able to test it. But I am also not + able to fill them on this criteria because they did set the language of the app correctly. This is one example. +
+I can give you another. Second one is even more funnier. We have an automated testing solution. We are using it + ourselves already. What we see there is that companies are adding the role to the name. So they add in the name + field, they add in, let's say, "login button," that's the name, but the role is empty. As auditing firms, if you + can't go to the source code, you can't know what they programmed, so you are dependent on the screen reader. And the + screen reader reads out, in both cases, login, comma, button. And of course, sometimes I notice already it's not a + comma but a dot or two spaces. I was like hmm, probably they are cheating on me. But this is what happens. This is + what happens. Yeah. But that's, I think, the two things that, at least in the Netherlands, we are trying to identify + how to deal with this. And it's not clear yet.
+DETLEV FISCHER: I noticed that most resources you find, I mean, what you have mentioned, WCAG EM, + that's the Web + Content Accessibility Guidelines Evaluation Method, which has been developed a number of years ago, that has defined + certain steps that you need to do, for example, to set up a representative sample of websites and so on. But what + you can and what you cannot exclude. And all that I think can be used on apps in a similar way. But all that does + not give you anything on how you actually evaluate particular success criteria. So there's nothing in WCAG EM at all + about what does it mean, reflow? What does it mean, text size, resize text? What does it mean for an app? So I think + at that point, it becomes more interesting to see, okay, what are the particular testing processes or testing + procedures that exist for mobile apps? My suspicion is often companies doing this may not be willing to detail the + exact method they use to do this. So you end up with this very general statements in the European norm or in WCAG + and then have to wonder and scratch your head, what does it actually mean? How do you work it out? How would you, + Andre, tackle that? Are there test procedures you use for auditing mobile apps? Which ones are you using, how do you + see that situation?
++ ANDRE PIMENTA FREIRE: Thanks for the question, Detlev. I think it's a very relevant issue. + How we deal with the guidelines, how we deal with the testing procedures when there's so little defined, like ground + rules, well established procedures as we have for the Web. + What I have noticed, looking from the research perspective, is that many companies have stepped in and defined their + sets of guidelines for native apps, their sets of testing procedures, so we've come across a couple of guidelines + sets and testing procedures from companies. + +
++ So the BBC have defined their set of guidelines with a couple of indications of how to test their proposed + guidelines. We followed the work of some of our colleagues in Brazil, working alongside some in Brazil, to define a + set of guidelines to native apps and how they test. So many companies are stepping in and defining their own sets of + procedures. And it was one of the guidelines, for example, we found in a research study with practitioners that, for + example, they found it easier to test some of the BBC's guidelines with well defined testing procedures. They found + it easier than to map the WCAG Guidelines for mobile apps, which still don't have a lot of the sufficient and + advisory techniques attached to them, which is where you find the well defined testing procedures. They found that + easier. So having well defined procedures to test specific guidelines. + So I think this is still an open issue. We have to work on them. But for practical terms, I think it's interesting + to look around at what specific companies have done to try and approach that while we don't have the well defined + procedures. +
++ And in terms of the tools, that's not something I have done research specifically on, but I have collaborated with + some colleagues and have a lot of very interesting challenges in native mobile apps, as you mentioned. Like we don't + have pages, so what screen should we check and evaluate when we are doing our sampling? That's very challenging to + choose. And we have to find specific rules and guidance on how to do that. + On the other hand, what I have noticed from some collaboration I have done with some colleagues, specifically from + software engineering, they are coming from testing, what they've done is when they are doing testing for mobile + apps, native mobile apps, they are employing techniques they are bringing from different testing techniques they + have, and the approach is similar to, as we mentioned, Accessibility Scanner. But in research, some researchers have + already advanced on that and tried to exploit some techniques that you can use to simulate different interactions + you have with interface components. Maybe in the future, by exploring more of the accessibility APIs to dig in that + information and bring some more information, maybe we'll also have a lot of difficulties when you compare to the Web + world, we could also have some advantages by bringing different techniques that were more challenging to employ in + the Web world but then bringing a lot of advancements that we have had in software engineering, in software testing, + to mobile accessibility testing. We could have some new approaches we haven't seen in automated Web evaluation tools + either. + So I see that it's a field with a lot of open questions, how to sample, how do we have well defined rules to test + and to evaluate and to report on those issues? From the research perspective, I also see that we have a lot of good + opportunities. +
++ In the Brazilian context, as Paul was mentioning what's going on with European standard using the software + guidelines and trying to map them onto mobile accessibility, Brazil has been kind of a funny situation. Our current + law for accessibility in general is from 2015, and it broadened the scope for public and private organizations, + which was the previous law was very limited. It only encompassed the federal government and the executive power. + It's much broader now. However, the law says that all private and public organizations should have accessible + websites, which doesn't include mobile apps. So we are kind of in a gray area in the country. + On the other hand, as I mentioned earlier and I will post it on the link there. I am sorry those who want, this in + Portuguese, I think we only have the title in English. But as I mentioned, Reinaldo Ferraz here with us was working + very closely on that working group. We have a very specific set of guidelines for native mobile apps. But it's only + one year old. It was released at the end of 2022. And I haven't seen a lot of work in the country in terms of + defining a set of guidelines to evaluate it. As I mentioned, the law doesn't point to that standard, so there's no + law enforcement activity in that sense. +
++ So I think we might be in for a lot of work in terms of having specific evaluation tools for that and procedures, + but we don't have, at the moment, as I mentioned, it's still very early days. It's been published only a year ago. + So there are many people getting acquainted to the new standard. But still, I agree with a lot of what Paul and, had + discussed. There's a lot to do on reporting procedures, standards, and working on the automated evaluation tools. + But again, we might have to look at it in a different way, looking at the opportunities we have by the different + ways of working with it, but also seeing the differences we have from the Web world and having more work that I + think we are going to see in future versions of WCAG and other standards as well. +
+DETLEV FISCHER: Paul, can you add to that regarding the automated opportunities? I think you + mentioned that Abra + is also working on some automated solutions. What can it do, and what are the opportunities, in your view?
+PAUL VAN WORKUM: Maybe I want to react on that as well. What I see is that, of course I see two + things. I see + one, that in the Netherlands, like I am only working in the accessibility field for three years, and I am spending, + together with the people around me, around 80%, 90% of my time on product development. So we are really digging into + certain things. That's why we were able to gain so much knowledge in such short time.
+But what I see is that there are some experts from the Web stating to me that apps are the same as websites. And + that's what I find really interesting is that there's a lot of experts that know in Web accessibility that you + should look at all the small details. In apps, that's quite frustrating if you are looking at the details and you + can't go to the source code and you can't you don't know the test rules are not working, and so you have to + interpret things. And that basically means that, like two things. One, apps are generally shitty. So there's a lot + of improvements that can be done. So let's try to make first big steps for users. Because I think that's why I am in + this field. I want to help users to help apps. And if you don't know it, think about the principles, like is it + operable for everyone? So with a keyboard, with a and is it, like, a really big issue if it's broken? That's, I + think you can quite easily make a big step. And then, of course, in the details, then it gets complicated. And then + it gets also a lot of discussion because then people agree differently. But I think the first, to make big steps on + name, role value, on font sizes, on contrast, on labels, on images. I think if you do that, you already make a + really big impact for a lot of users, and that's not that complicated. Everyone is clear. It's quite clear what you + need to do there.
++ So that's start with the user, I think that's a really good example, and fall back on the principles if you don't + know the exact test rule or and it's different with Web because with Web it's like already in the level of detail + that, as an expert, you can't do that anymore. + I think with apps, finding the issue to be compliant, it's not a problem. Most of the time fixing the issue is the + problem. Because fixing the issue with not only, like, native iOS, native Android, but also the framework, different + frameworks, cross platform frameworks, there's a lot of solutions. And some frameworks are not able to do certain + things. So what we see is that where in the Web, Web is like kind of markup language, you can always fall back on + HTML. With an app, we have a programming language, and it's like it is what it is. And if you have an issue with + your keyboard, it means that you should, like, start over and build a new app in a different programming language. + And my question sometimes is to the government, on one side, I want it to be fully accessible. On the other side, + they've invested a few million euro on a certain programming language. It's not that you have to have one module or + one piece or one element that you have to change. You have to rebuild your, a new app, probably hire a complete new + team of developers. So I think that's, for apps, also quite a big challenge. +
++ And I think we should make accessibility a bit more fun, at least for people who are just getting in touch with it. + So automated testing, that was a question from Detlev. I think automated testing is not a solution for everything, + but it is a solution, you can make a big step with the basics with a lot of developers in the teams. Because if + everyone knows if they develop something, when basics are going wrong, we can make a big step in accessibility. + What we I don't want to do a big promotion, but what we are trying to do and what we are going to do in January I am + not allowed to make deadlines for my team, but basically, we are going to launch a tool that can test the first + screen of an app for free, so you can just put in the URL, and you get a report on the first screen. And if you want + to test more screens, you can do it in the cloud, and it will be a very attractive +
+DETLEV FISCHER: Is that for because you mentioned URL, is that for Web for apps?
++ PAUL VAN WORKUM: We only do apps. But you add the URL of the app in the App Store or PlayStore, and + we will do + the rest. So that's one. + Secondly, we will have a kind of debugger or accessibility inspector, and it will be able to take all the screens + apart. And you can inspect the name, the role, the value, if elements are focusable. We are investigating if we can + do it better with contrast because with the contrast check, if you make a print screen and send it to your desktop, + it's between 1% and 10% change in color code, meaning that if there's a contrast of 4.1, it could be 4.5, and it + could be a bust. So how can you deal with this? Can you only fail if people are below 4.1 or only fail when it's + below 5? Because then potentially it could be in the danger zone. A lot of questions, but what we can do with + automated testing is we can find the name, role, value. We can find contrast issues, target size issues, labels, if + their labels are added, if decorative images get focus, and text sizes. That's when we look at apps and we see an + app that didn't do anything, I think from 100 issues, 80 issues are name, role, value, text sizes, contrast, stuff + like that. So you can make a really big step doing using this automated testing. Of course, if an organization is + further, then the issues get more complicated, and then you can find, like, maybe 10% or 20% of the amount of + issues. And so it's not fair to say that we can find 80%. I did not do that. But we can find quite a lot of the + issues that are occurring in apps. +
++ DETLEV FISCHER: That would be a useful tool to have and make testing probably easier on those + things. + I just just to get back to the question of how to apply certain WCAG criteria, would like to give an example and + lead to a question which is about the responsibility of the author versus responsibility of the operating system and + the settings the user can make there. + For example, text size. Right? In the Web, you have ways of increasing the text size, for example, by most commonly + by zooming in. You zoom into your browser. The text gets bigger. You have a reflow. At some point it's just one + column of text. And you can then check, okay, does it increase text to 200%? + In apps, that usually doesn't exist. Of course, it's a possibility that the app has something like increased text + size, but most apps don't have that. And the common understanding of most practitioners evaluating apps regarding + text size is, well, we actually don't need to have a look at that. We don't need to look at that at all because + there's a zoom function in the Accessibility settings. So you can just turn on zoom, and then you can zoom up to + 500% or even larger by now, 500% I think is the latest I remember. And that will give you enough text. But + obviously, that also means that you don't reflow, so you get larger text, but you have to pan your screen + horizontally to be able to read the lines. Right? Because you don't have a reflow, you have to do that. So reading + gets a lot more difficult for people with visual impairments because they all the time have to pan in order to read + the text if it does not reflow. +
++ So the upshot is, okay, how do you decide, as an evaluator, whether that is good enough, as many people say, or + whether you also check that the text in the app changes its size if you change settings in the operating system? You + can increase a text size in your operating system assistive technology accessibility settings. You can say large + text, and if you implement it well, then the text in your app should get larger. Since you can do this. as an app + developer, is this something now that you also require and say if you don't do that, this point fails? Or is it + something that you can do on top of something where you say, well, this passes anyway because we have the zoom + function on the operating system level? That's my question to you. I mean, how would you deal with those questions + in evaluation? What's your line on author's responsibilities versus operating system level capabilities? + Does anyone want to pick that question? Paul? +
+PAUL VAN WORKUM: Yeah, I can give a reaction. I think in my bookshelf, on my bookshelf, there is a + dictionary. + And the question is because I have this dictionary, is it fine to use, then, very complicated words because I have + the dictionary? It's the same with AI. Like there is AI maybe the possibility to give me feedback on how the screen + is set up and what kind of icons are being used. And is that enough? That's the same with the book, the dictionary + on my shelf. Like is it enough? And that's the thing. What we see also from the Guidelines, what we see for Web is + that if you have a high contrast mode or big font size possibilities, and of course, if I put the high font size on, + then my letters are big enough that you don't need to meet certain contrast criteria anymore. I think it's a + discussion that you don't want to start.
++ DETLEV FISCHER: I started it. + (Laughter) +
++ PAUL VAN WORKUM: Yes, you have. I will give you my answer. Like I think that if you have an app + without changing + the settings on high contrast mode on the bigger font size, it should be working for, like, the average user. And + the users that need bigger font size, they should be in the system, be able to put it at least to 200%. Then all the + text in the app should scale. Not scale to 200% because in our automated testing tool, we found out that the body + text is scaling 200%, but the text that is already big at the highest levels, it means that maybe it only scales + 150%. So you can't so this is also, like, text should scale to 200%, but then in the settings, you should probably + put it at 300% or 350%, and we see that headings only scale to 190%, so it's untestable. What we do is set it at + 200%, and all text should be visible. Each letter should be in the box of the buttons. Meaning that you should check + if text scales, yes or no; and if it scales, are all letters visible? That's basically the simplification. But with + apps, in the title bar, in the tab bar, if you scale the text, you get issues again. But then there is a solution + with long press that you have an alternative to press it a bit longer, and then it's shown as well. I think that's + what you should do as a developer for large text. + And also with the contrast, I think that every user that is using your app, maybe seeing really well, if you walk + outside and it's sunny, you still need a certain amount of contrast. And you don't want to go to your settings to + make it a little bit bigger, a little bit more contrast. No. By default, your app should be usable for users. So + that's why we do it in this way. But maybe, Andre, maybe you do it differently. I am very curious. +
+ANDRE PIMENTA FREIRE: I totally agree with you, Paul. And from another perspective, when we look + into what kind + of devices people use, not everyone can afford let's take an example. What we've seen in a lot of research, the + iPhone. We've seen that a lot, and many people have asked and many research studies, so why have you not invited + iPhone users for your usability studies on accessibility? And we have actually done a few of them because we have + very few iPhone users in countries like Brazil. Some research studies have shown that more than some of them, 85% of + people surveyed used Android, Android phones, and even within the Android world, you have a lot of variability of + the type of devices people use and devices' capabilities, models, et cetera. So relying on the device may be very + tricky. So I totally agree with you, Paul, we should definitely try and look to provide the resources people would + need even if they have devices that wouldn't provide more features to do that on their own.
+As like Paul mentioned, there could be people who don't use assistive technologies every day, maybe because they + are outside and it's sunny, or even what we've seen, particularly here we have a recent case in our University of an + older student who is gradually becoming he is losing his sight. He still can see very little, but he he was not used + to using assistive technologies from his early ages, so now he is struggling to cope with different settings and to + learn with different things. So if he has to do a lot of work in terms of learning how to use assistive technologies + on his mobile phone, that's not easy. So I take from this example that if we can provide, especially if it's covered + in standards that are directed by regulation, I don't see why not to do it. I think it's good that we have devices + that are providing good resources, that have good assistive technologies, but they are not always available. People + are not always able to use them, as we would think. So I totally agree with that, Paul.
+DETLEV FISCHER: Yeah, I think it makes a clear case for needing more specific advice on how to + apply the success + criteria or EN requirements. For example, for the resize of text, we mentioned there are different ways of doing it. + There's zoom. There's also the accessibility settings for larger text. But it does not differentiate between body + text, it does not say anything about text that's already large. It does also not say anything about, say, labels in + a tab bar which could not grow by the same amount because they would break up or they would need to be truncated. So + those issues, they exist. And if you apply WCAG requirements by the letter at the moment, which does not + differentiate between types of text, it just says everything has to be 200% larger, then you end up with + recommendations for developers which may not be useful, which may actually make things worse. You know? And the + thing you mentioned, Paul, that you could have a pop up text, which is larger, is a nice way out. It's not really + something that, I think, has been foreseen in this, and it would not really be clear if that would be meeting the + requirements because it requires an extra step to bring up that enlarged text. But it's certainly something that + would be more, you know, more germane to the app environment. And there are many cases
+PAUL VAN WORKUM: Yeah, I think the problem is that if you enlarge if you say text should be + enlarged in the tab + bar, what you are saying is then you get dots and you can't see it as well. So the best alternative is what we see + as well is if you look at landscape mode, it's quite often used in combination with larger font size because then + you have three sentences with a lot of words instead of only like reading two words in each row. It's very tiring to + read in that way. Yeah, what we see is that we cannot fail on bigger font size if the solution is breaking up other + things as well. You cannot say to the developer in my opinion – you should do this but if you do this, I will fail + you on something else. So best practice, long press implemented, but in audits, are you able to fail on this? + Because if you fail on it, if you have five tabs, sometimes it gets three or four lines high, meaning that you don't + have space for your content anymore. Or if you have space, it's basically not yeah, you can't have any overview + anymore, especially with bigger font size on. Yeah.
+I have maybe another one. It's like with lists, I think it's one of the questions of users as well, that basically, + if you are in an app, if you present a list with dots, like bullets, it doesn't read it the same way as on a + website, meaning like it's a list, one out of four or one out of five points. One of my cofounders made a Dutch + COVID app accessible. He was the lead accessibility developer there. And if there was an auditing firm that did a + lot of Web audits stating that it was a fail, the list. And because it was very important for the Dutch government + to make the app fully comply, he built something that it was read out that the list, one out of four, but it means + that if you have it in 15 languages, it's a lot of, like, translation and strings, and it was it took a really long + development time. At a certain moment, the auditing firms in the Netherlands said if you can separately tap each + item, it's also good enough. So we are organized in an inspection group, and we agree that we are dealing with lists + in this way.
+DETLEV FISCHER: Yeah.
+PAUL VAN WORKUM: But now we go to the European Union, and in other countries, they do it + differently. That's, I + think, we should do something with that, that on a European level or, I don't know, that it should be that everyone + does it the same because otherwise it's unfair that some countries do or some auditing firms do, and we have a lot + of discussions in the Netherlands that some auditing firms fail on the keyboard controls or the focus indicator not + having enough contrast, and others do not because they say it's standard. We can't fail on this.
++ DETLEV FISCHER: Yeah, it's also there's a lot of leeway and a lot of wiggle room in terms of + interpreting the + requirements. You know? So you can arrive at different results. + We have a number of questions from the audience. Maybe I should pick some and throw them to you and see whether you + want to answer them. + One is what is your setup for mobile testing? Do you use multiple phones or a mobile emulator? Does one of you want + to answer that? Do you use several devices? +
+ANDRE PIMENTA FREIRE: We've done a couple of studies with different devices, but I mean, it's very + hard to have + all sorts of different settings. So I mean, in many situations we've seen, and from what we heard from a couple of + developers and testers, people tend to use emulators to have different settings, sizes, different OS versions, so I + think emulators can really come in handy.
+DETLEV FISCHER: Okay. And there's another question which goes to do you know tools emulate apps on + a desktop so + that Zoom text users can test apps more easily?
+PAUL VAN WORKUM: Like I don't use emulators at all. When I do an audit, I use one device, and I + test only + Android or only iOS. And I report, yeah, for each issue on which screen it is, I add the description of the problem, + I add a print screen, because then what we see that developers can identify very fast what is the issue. And we add + the criteria, meaning that if you link it, then, to the Appt platform, there's also the code base where you can find + the solution to the problem. So you can see a heading, and it's like with the screen reader with subtitles on, it + says log in, then it's not without comma heading, we say this is wrong, it's 1.3.1, look at a site for a solution. + So I can normally, yeah, like have in a meeting of one hour describe around 60 issues because it's like look at the + image, this is the heading. Next, is this a button? Insufficient contrast. Okay. Next. That I think is a very + important combination to have, like, visual information there. Especially in Appt because you can't with the + website, you can download the website and, like, present it later. But in an app, if you update the version and you + can't reach the old version anymore, and basically, it means that it's could be an issue that occurred only after in + an update, so you never know anymore if you did it well or not.
+DETLEV FISCHER: Yeah, well, I think, in my experience, there are differences between devices, + especially with + Android tests, so if you test something on a Pixel phone and you test something on a Samsung phone or some tablet, + you may get different readouts on certain items. So there are differences, and it may be useful to cover those. But + that's, in our experience, also down to the customer who wants to say, you know, please test this also on this + device, on this Android skin, for example, because this is one which we know has a large market share. And we want + this covered. + But regarding sharing the screen, we have done internally, we have done quite a few app tests where the blind tester + and the sighted assistant have been working together and sharing the screen via Zoom. So the blind tester shares his + screen, and at the same time, the assistant has the same app open on their device so they can also use it + independently to verify what the blind tester does. So that has turned out to be quite useful. And also, the blind + tester can share the screen reader output, so the assistant can also hear what's being output. So that is a setting + which has been quite useful. But it is time consuming. So be warned if you do that kind of testing, it is quite time + consuming. + I don't know how we are on time. We are now 16:25. Do we have more time, or are we +
+CARLOS DUARTE: No.
+DETLEV FISCHER: Okay, then. We have many, many questions. I am very sorry that we haven't covered + more of them. + There are many questions, and I hope I can answer some or maybe the other participants in the panel can answer some + of them in the Question and Answer panel. And thank you very much very much for your insights. I think it's an + extremely useful discussion and there's so much. We just have got just scratched the surface, and there's a lot more + to talk about. But this is all we could squeeze in. So I hope it was useful for you.
++ CARLOS DUARTE: Thank you so much, Detlev, Paul, and Andre. It was definitely really interesting. There are still + a lot of open questions there in the Q&A, so if you some of you can tackle those, it will be very good for everyone + involved. It was really insightful and full of actionable material, I would say, so thank you so much for your + contribution. + And now let's have another ten minute break, and we'll be back at 16:35, so 5 minutes past the bottom of the hour, + for our final session on Artificial Intelligence for Accessibility Evaluation. So see you in ten minutes. +
+ ++ CARLOS DUARTE: Okay. So I think we are ready to start our last panel. The topic for this panel + session will be AI + for Accessibility Evaluation, and it's going to be moderated by Matthew Atkinson from Samsung R&D, in the UK. And + our participants will be Yeliz from the Middle East Technical University in Turkiye and Alain from SIP in + Luxembourg. + Once again just a quick reminder for any attendee that has joined in the meantime, we are using Q&A for posing + questions that you might have to the panelists or to the people in the session. And we are using chat to share any + resources linked to topics being discussed or for any technical issues that you might have. + So Matthew, you can take it over. +
++ MATTHEW ATKINSON: Hi, everyone. Let me just juggle my windows slightly, first of all. Just one + second. + Okay. So we're very excited to have this chat. It's a privilege to be here. I welcome everyone. Thanks for your + attendance and to the insightful panels that have gone before us. Where this topic has actually come up, so we'll + try and give you our take on those questions. + So how this is going to work, we are each going to introduce ourselves and speak for a couple of minutes just to set + out our experiences. And you will see there's a lot in common between the three of us in terms of threads, parallel + threads. So what we'll do is we'll do that. And then we'll move into general topics of discussion. Of course, there + are some questions we already got from the audience which we've looked at, and as we can, we will answer things that + come up during the session. +
+So I'll begin. Hello again. I'm Matthew. I am Head of Web Standards at Samsung R&D Institute, UK. However, just to + be clear, I am not here representing Samsung. I am also co chair of the W3C's Accessible Platform Architectures + Working Group, which I will call APA from now on. One of our main jobs is to review W3C's specifications for + accessibility, but we also do original research of our own. And whilst I am not speaking on behalf of APA either, we + do a lot of research in this area, and particularly our Research Question Task Force, we have a lot of experts in + that task force that look the trends in this area. So I will rely some of my experience and some of theirs. So what + follows is my personal opinions based on experience, some experience of accessibility auditing and a little of + academia as well.
++ So one thing I wanted to do first of all is just distinguish between AI or machine learning and some of the current + automated evaluation that we can do. As other people have mentioned, actually, in previous panels, there are + automated accessibility evaluation tools, and they just use standard sort of heuristics. And they can capture around + 30% of the sorts of problems that the Web Content Accessibility Guidelines, or WCAG, identifies. So they don't + capture the majority of the problems, but they can give you a good barometer, a rough estimate of accessibility, and + they can be run in an automated way. But they don't use machine learning. So we are not talking about those. We are + talking about more recent developments. + And on machine learning, you'll notice that we'll talk about risks and opportunities, and we'll also talk about + mitigations. And I am just going to highlight one or two of each of those just now. And we'll revisit these as we go + through. +
++ So there's a concept from the literature called "burden shifting" or "shifting the burden." And a good example of + this is, for example, automated captions that are generated on, say, videos, streaming videos. And whilst they can + be useful, they are not necessarily 100% reliable, or they might be very reliable, but they are not 100%. And there + are some risks presented by that because if you are somebody who can't hear what's being said in the video and you + are relying on the captions to be accurate, then the burden for verifying the accuracy of the captions has been + shifted onto the person who is least able to do so. So that's one of the big risks. And there are others that we'll + talk about as well. Alain has some great examples of those. + There are some opportunities, though, because there are some things that machines can do better than humans, and + with some guidance, could present great opportunities. And Yeliz has some really good research that she'll share + with you on that front when it comes to accessibility evaluation. +
++ And in terms of mitigations, I just wanted to put two links in, which I will copy into the chat whilst I'm talking, + and these are two W3C efforts trying to help in this area. So I am just going to paste these in, and I will tell you + what they are. There's the W3C's Principles of Ethical Machine Learning, which is an official W3C deliverable, which + is being worked on. And then there is also a community group, which isn't official W3C work, but it's something that + is being incubated. This community group is called Accessibility at the Edge, and one of the things they are trying + to do is gather consensus on where we can and where we might not find machine learning to be helpful. So anybody can + join that community group. You don't need to be a member of W3C in the sense of being a paid member or a member + organization. You only need a free W3C account. So we welcome feedback on these efforts. + Okay. So that's definitely enough from me. So I will hand over, first of all, to Yeliz to give your sort of + introductory statement. +
++ YELIZ YESILADA: Hello, everybody. Good afternoon. Thank you, Matthew. First of all, thanks for + inviting me here. + It's been great. It's really great to see the first two sessions. I really enjoyed them myself. Especially mobile + accessibility one. I remember it was in 2009 that we actually created that document in the Education and Outreach + Working Group talking about the common experiences between mobile users and disabled users. So it's really + interesting to see the discussions and how they evolved. + Let me introduce myself. So I've been in academia for more than 20 years. It's been quite some time. I mainly do + research on Web accessibility. And recently, actually, the last five years, my research mainly focuses on using AI + to actually improve accessibility for disabled users. I do research in eye tracking and human computer interaction + as well, so we also try to use, for example, AI in, let's say, for eye tracking research and how we can actually use + AI for eye tracking. +
++ The recent research that Matthew mentioned, we've been actually looking at we have already discussed this in the + previous session, especially the importance of WCAG EM, evaluation methodology. It is a great resource for, of + course, systematically evaluating websites. In this case, I am broadly using the websites definition. But there are + a lot of subjective elements. I guess Paul mentioned it in the previous session, for example, even if we take a + website, two auditors can generate different conclusions. + One of the reasons for this is basically, WCAG EM has different stages, and one of the stages, for example, is + defining the evaluation scope, what you consider, for example, as a website. Then exploring the target website, so + deciding, for example, which pages, you need to sample from the site, which pages to consider. That becomes a + complex and subjective task. And we actually propose in our research a parallel methodology. We call it "Optimal + EM," where we try to explore mainly machine learning approaches for actually doing a bit more systematic sampling. + So on chat, I added the recent two papers that we published on this. +
+So what we try to do, we try to, for example, first of all, define what is establish a population for a website, + what is a population, because you need to decide, for example, what pages are there, which ones are used, which ones + are not used, et cetera. And then what we try to do is we try to cluster pages that are available on the site by + using unsupervised approaches, mainly based on statistical techniques. And we try to generate representative sample. + But of course, generating a representative sample for a site is not that straightforward because you need to + consider, for example, do we actually have enough coverage? Do we cover different pages? Do we cover, for example, + the freshness of the pages? Do we cover the complexity, variety of complexity of the pages, et cetera?
++ So we also introduce, for example, different metrics that can be used to assess whether you are doing good sampling + or not. This is basically trying to use unsupervised learning approaches to actually do sampling to help to choose, + to guide what kind of pages you take from a site. And then you sample and you do the evaluation. + In my research, I also I am also quite interested in complex structures. For example, tables are complex structures. + How do we evaluate the accessibility of those complex structures? Because those kind of complex structures, for + example, they are used for, let's say, not just for representing data, but they are also represented for basically + visualizing or laying out the content of the page. So we also try to use, for example, supervised approaches, + algorithms where they try to look at data and learn from differentiating from that data, learn to differentiate, for + example, where tables are used for layout or they are used for structuring purposes. +
++ In general, just to set out the overview, these are from my research examples. But I believe the AI can actually + help in two ways. AI is not going to be, of course, a silver bullet. So it's not going to solve all the problems. So + Matthew mentioned, for example, that the 30% of the issues can be automatically already identified. But of course, + with the rest of the 70%, if they can help us and automate certain processes, that would be great. + So it can actually be useful in two ways testing, and also for helping the authors, guiding the authors, or maybe we + can call it repairing the accessibility issues. + So for testing purposes, I also see that there are certain areas where we see potential. For example, language + models can be used to assess the complexity of the text or the layout. We can actually also use, for example, + alternative text generated, whether it is appropriate or not for certain kinds of elements. That can also be an area + where automation can be done. + Images, whether they are used for decorative or for semantic purposes, again, AI can help there for differentiating + them. + Page elements, I've been doing research on that for a long time. It's a complex task to actually take a page and + decide what are the page elements and their roles. But of course, machine learning can also help there. +
++ But there are also things that can help. Of course, AI approaches that can help at the authoring stage, in my + opinion. For example, generating alt text. So we see that there are a lot of research in that, especially in image + recognition and machine vision. Or automating the caption generation, so that can also help in, for example, + automated caption generation. Or text translation, because we see that multiple languages can be an issue, so + automated text translation. So AI models can also be useful there. + And these kind of examples, I guess we will discuss them. But of course, besides the evaluation and also the support + of authoring, there are also tangent issues to these approaches that we have to be careful. Matthew already + mentioned. For example, these algorithms, they rely on a lot of data and good quality of data. So it's critical that + we got data and we got good quality data. It's also important that we avoid bias. So for example, certain user + groups and certain disabilities, we should not really have a bias towards certain user groups or disabilities. We + should not really exclude users, so ethical dimension is critical in there. And also, the accuracy and reliability + of these approaches that I mentioned, they are also critical. So how successful they are or how accurately they can + actually help us. But of course, they cannot solve, let's say, the full problem. But they can at least assist and + help and guide in the process. So these are the issues that I wanted to mention as the tangent issues. +
+Matthew, I think that's all I wanted to say. I guess we'll discuss them later again.
++ MATTHEW ATKINSON: Yes, yeah, lots to discuss, lots of common threads. Thank you for that, Yeliz. + And now over to Alain for your introduction. +
++ ALAIN VAGNER: Thank you, Matthew. Yes, so I will just briefly present myself. I am an accessibility + specialist at + the Information and Press Service of the Government in Luxembourg. It's a small country in Europe. I am also a + member of the committee developing the European norm, so via CEN and CENELEC. I have background where I work in the + field of human computer interaction for several years, and I have also been a software engineer and product manager. + At the Information and Press Service of the Luxembourgish Government, I am a part of a small team in charge of + several topics, like administrative transparency, open data, freedom of information, and also digital accessibility. + And more precisely, we are the organization in charge of monitoring the accessibility of public sector websites and + mobile applications. In the framework of the European Web Accessibility Directive. +
++ So there are similar organizations doing the same job all across Europe in all EU Member States. And we are also in + charge of the awareness and training of the public servants on digital accessibility, and we monitor complaints + coming from the end users. And for each complaint, we act as a mediator between the end users and the public + administrations. + Regarding the monitoring, we are conveying more than 100 audits per year, so it may seem few, but we are also a + small country, so that's why. And all our audit reports and all the data we produce during this monitoring are + published online and are on open license on the National Open Data Network, and they may be used, for example, to + train an AI model on this, for example. So I don't know if this is quality data, but they are some kind of readable + data for sure. +
+I wanted also to mention that I am not an AI specialist, but I am interested in the topics and all tools and + technologies which could help us improve the performance of the execution of our audits. That's one thing. And also, + I wanted to mention that personally, I am quite aligned with Yeliz when she said that AI maybe not a silver bullet, + and I don't think that accessibility can detect that sorry that AI may solve all accessibility issues, but we must + find the right tool for the right problem. That was it for me. Thanks.
++ MATTHEW ATKINSON: Thank you very much, Alain. So lots of stuff to talk about. + First of all, one of the things that we just discussed on the risks side of things was bias and avoiding or trying + to avoid excluding certain user groups. And Alain, you actually have a really good example of this, involving + language because of population size. So would you like to tell us about that? +
+ALAIN VAGNER: Yes, so regarding the languages, so Luxembourg is a very small country, but we have + several + languages here. So for example, the national languages are German, French, and Luxembourgish, which is a separate + language. So yeah, in the population, also 75% of the population speak more than one language at work. And 50% + between two and three languages at work. So this is the multilingual part in Luxembourg is very important. So it + means that this can also be reflected on our websites. So all the websites need to be in multiple languages, or the + official websites from the public sector. And we have also lots of issues with mixed languages on the websites. It + means that as people are really used to speaking multiple languages, it's not uncommon to see, for example, someone + yeah, so some chunk of text where the language is different from the main language of the website, for example. And + this is really common. But this needs to be appropriately tagged with their language attribute, for example, in + HTML, so that the recitation from the screen readers will be correct with the right speech synthesis. That's the + first one. We have also some issues with the videos. So it means that, for example, we are trying to have subtitles + and transcripts for all our videos, and there's no automatic captioning available for small languages. So we have + 400,000 speakers of Luxembourgish, and the big platforms, big tech are not really supporting those these small + languages. So it means if you have, for example, a video in French or in German, you will have automatic subtitles + on YouTube, for example, but if something is speaking Luxembourgish, or worse, if somebody is speaking in the same + video in multiple languages, then you are alone, and you should subtitle it yourself. So this could be more costly + to produce. And here we have also some projects on this topic, like a project related to speech to text engine, and + the tool for the automatic transcription of videos. So these are ongoing projects using AI. We are not there yet, + but we are working in this direction. This is one point regarding the languages.
+And another point is also the complexity of the languages. Because if you are in a multilingual context, you cannot + assume that everyone is totally fluent in all the official languages. And this has also an impact in accessibility + because, for the Deaf community, as you know, people who are born deaf have more problems acquiring languages. And + maintaining context is also an issue. So we should also work on easy to read documents, easy to read pages so that + it can help people with cognitive disabilities but also the Deaf community. And on our side, the Deaf community is + mainly German speaking, so we are working mainly on the (speaking native language) it means easy to read pages on + our websites.
+MATTHEW ATKINSON: Thank you very much. I think there's some really good real world examples there + of the + implications of sizes of data sets and those kinds of issues. And the example of captions has come up quite a bit. + And it's a good example because it allows us to introduce a concept of at which time do we use a machine learning or + AI kind of approach? And in that captioning example, although it's not directly related to evaluation we will bring + it back to that shortly the captioning example shows us that at one time, authoring time, helping somebody, a + person, make the captions, it could really speed them up. Now, of course, right now, we are benefitting from a human + captioners, which is the best you can get and is fantastic. But not everybody is able to support that. So authoring + time allows a human the option of correcting the mistakes that they know are there. Runtime does not. So that's a + difference in implications because of the time at which you employ the tool.
++ And talking about accessibility evaluation, doing things at sort of sampling time as opposed to audit time, perhaps, + may be very similar implications there. The statistical models for looking at large sites, speaking as somebody who + has had to do sampling in the past, I would really appreciate being guided by those tools. I would, perhaps, be less + confident in machine learning's ability to pick up, certainly, all the issues, maybe even a fraction of the issues, + in the real sort of accessibility testing side of things for reasons that Yeliz has mentioned and also was discussed + previously about the issue of context. + So again, guidance, supporting scalability by having the tool guide the human and using it as a tool, more on the + authoring time end of the spectrum rather than the runtime end of the spectrum, in my view, at least, could result + in more reliable and, therefore, fair usage. +
+So Yeliz, you already introduced us to Optimal-EM to some degree, and you also talked about ways that the tools + could be used, machine learning could be used. For example, at authoring time to provide alt text. Could you tell us + anything about this issue of context? And I think you touched upon it with the tables where the machine learning + system has to do some interpretation and what sort of risks might arise from that and where there might be some + opportunities.
++ YELIZ YESILADA: Of course, identifying context is a big challenge, I think. Of course, for + evaluator, it's also a + big challenge, considering different contexts for the evaluation. But, so certain complex structures, they can be by + actually having, let's say, relevant data, certain algorithms can be generated to guide, let's say, the authoring + stage, as you mentioned, Matthew. + So during the authoring of course, these are all intertwined together, you know, the authoring and evaluation. + Because if they are corrected at the authoring stage, then it's going to be easier to do the evaluation, and it's + going to be kind of easier to test them at the evaluation stage. + But if, for example, while the author is, let's say, authoring and generating certain structures, they can be the AI + can actually help there to identify, for example, that certain structure is actually used, for example, for not + putting the data not presenting data, but it's actually used for laying out, for example, that it should not have + been used because it's causing problems to screen reader users. That would actually be a great help, as you + mentioned, at the authoring stage. + But identifying the context, it's a big challenge, and of course, it will be also algorithmically challenging for AI + algorithms, I think, identifying the context. So it's not going to be straightforward issue. +
++ MATTHEW ATKINSON: Indeed. So shifting gears slightly, isle not sure if we've had any questions. + There are certain + other topics we'd like to consider. And just a note that you will see me using an assistive technology here, which + is called the Manual Zoom, so that I can check for questions every so often. + But one of the things that might be useful is Alain, you had set out a series of requirements that you would have + for considering using AI or machine learning technology. So would you like to tell us a bit more about those? +
++ ALAIN VAGNER: Yes, no problem. Yeah. So, yeah, as a public sector organization, we have, of course, + a few + requirements regarding AI. + I would say the first one is the transparency because in the public sector we need transparency. And for AI tools, + we need to know, for example, how has it been trained, where the data is coming from, because it will help us also + regarding the questions for the questions regarding biases, for example. Biases are a frequent issue in the AI + field, and we absolutely want to avoid this. For example, to be more precise, more performant on one type of + handicap and less on another, so this we would like absolutely to avoid it. + And yeah, for example, if we had some kind of AI trained on all our reports, we could, for example, I don't know, + maybe have automatic we could find some issues automatically. But for the edge cases, where we have less training + data, we would have less precision. And on these edge cases, more often than not, these are the issues where we + spend lots of time as an auditor. So this is something that may be a bit tricky. +
++ I would like to also mention the accountability because we need to be able to explain a decision, how can we do it + with if we have just a black box, for example. So this may be an issue with some models. This is also relatable to + the AI. This is also the concept that an AI or algorithm cannot alone be made accountable for a mistake. So we + cannot use AI to exonerate us or so from our responsibilities towards the persons with disabilities. + Yeah, there was also the questions about the metrics, and I think Yeliz already mentioned a little bit. For us, we + would like to know how to evaluate the efficiency of an automated tool, an AI tool. Two basic metrics I see are the + detection rate and the false positive rate, so these are the two which are really important for us, so the tool + should be able to detect the issues if there is one and also avoid saying there is an issue if there is none. +
+So yes, that's it, I would say. And more globally, maybe more at an abstract or political level, when introducing + new AI tools, we should avoid the risk of disability danger, a concept introduced by Liz Jackson. It means that from + time to time, we encounter some AI technologies that have been developed, and they have not been created including + people with disabilities, and they don't really answer the need of people with disabilities. So this should also be, + to my mind, be included in our requirements.
++ MATTHEW ATKINSON: Yes. On that point specifically, I am not actually sure if this is in the + principles, Ethical + Machine Learning Principles, but one of the things that was discussed around the development of those and they still + are in development, like most W3C things, feedback is welcome it was discussed ideas around when a system makes a + decision about a particular user or a constituency of users, those users need to be involved in the design of the + system if it's going to be making decisions about them. And that's that feels to me like a related thing. + And you mentioned about metrics, false positives and detection rates. And Yeliz was talking earlier about the work + with Optimal-EM and getting to the stage where it could do unsupervised work. Could you say a bit more about false + positives and detection rates that you've come across in research, Yeliz? +
++ YELIZ YESILADA: Do you mean the metrics that are available or metrics in general for the sampling + work? Because + for the sampling work, we actually, with WCAG EM and in our research, we actually realize that we don't really have + metrics to decide, for example, WCAG EM says you should explore the sites and pick certain pages that are + representing the functionality. But these are really subjective definitions because you can pick a functionality of + the website, let's say, but it is very outdated. So does that mean you covered the functionality or not? + So in our work, we actually try to come up with metrics that they can be really assessing whether you are doing a + good sampling or bad sampling. So these metrics that we introduced, for example, they include the they have, for + example, coverage. So let's say you pick certain pages, but how much are you covering the whole site? You know? + What's the population that you are covering? In fact, we are drawing similarities with the census work that + governments are doing. For example, if you have a population and you want to do a survey with your population, you + need to make sure the survey is done with a sample that is representative and it has the full coverage of the + population. So we are trying to, for example, use these kind of metrics. And besides the coverage and + representative, we also, for example, introduced the idea of freshness. So if you are actually going to sample + pages, your pages should be kind of fresh pages, pages that people are using. + So let me give you an example of the periods during COVID 19. In that period, for example, certain pages that were + related to COVID 19, they were very critical for the population. So if an auditor is picking pages let's say they + are auditing a site but they are not including those pages, they are missing critical, fresh pages that lots of + people are visit. So we also introduced, for example, freshness. We introduced complexity, for example, because + auditors when they pick pages, they might pick pages that are simple to evaluate and avoiding the complex. Of + course, the question there is what do we mean by complexity? Because complexity can be like technically complex, it + can be visually complex, so you can have different kinds of definitions for complexity. But we think for sampling, + that also should be a criteria, for example, when you are picking pages for evaluating, you should not pick pages + that are easy to evaluate, let's say, technically, but they should think, really, of the recent technologies that + are used, you know, dynamic content. We know that they are challenging to evaluate. So do they include dynamic + content? That's another metric we consider. +
++ Based on these metrics, what we try to do in our work, let's say you generate the population of the site. We also + explore, for example, how do you generate the population of the site? For example, you can crawl the site + automatically and find out all the pages, which we know that is not possible. Technically very difficult. Or you can + also look at, for example, the server side logs. So the server side logs can also be used to generate a population. + And we use these metrics to actually, for example, compare different ways of clustering, using machine learning + approach to cluster the pages. And then you can cluster them, for example, based on complexity. You can cluster them + based on the structural similarity. You can cluster them based on, for example, freshness. And then what you do is + you can sample from different clusters to make sure that you are actually kind of covering the a representative + sample from a site. + Of course, here we are focusing on the site. But in the previous session, there was a very nice discussion about + what do we consider, what do we sample from a mobile application? That should be, of course, considered. For + example, different screens, different layout, different pages generated, et cetera. So there are lots of questions + that need to be answered, of course, from a research perspective. +
+MATTHEW ATKINSON: Indeed, yeah.
+YELIZ YESILADA: I hope I mentioned your question about the metrics for sampling.
+ ++ MATTHEW ATKINSON: Yeah, that was very helpful. That was very helpful indeed. + So from my experience of doing accessibility audits, it is difficult to come up with a good sample. There is both + science and art to it. And we would often go for looking at the things that a user could do with the site or the + application and trying to cover as many of those different things as we could within the budget of the size of + sample that we had. And we would generally go for the more complicated theming pages so that we were making sure + that we would cover as much as possible. In some ways, it's easier if you have a smaller number of samples that you + have to get because you can pick stuff that's obviously different. It gets harder if it's a bigger site and a bigger + volume of pages to be audited because you want to make sure that each one is counting for something and not just + repeating stuff. And machines are very good at spotting patterns. So as I have said before, I would have been + interested in having some guidance, even though, as you've discussed, you know, in your answer there, it turns out + that counting things is one of the hardest problems there is. Just counting how many things we've got is incredibly + difficult. + +
++ So we actually had a question earlier I am just trying to see if we've got any additional ones now. But somebody + asked earlier about whether we actually need accessibility guidelines so much anymore if AI is going to be building + the websites? And I had a couple of perhaps not fully formed thoughts on that. Even if AI was building the websites, + and even if a different AI was measuring them and for my part, I don't think that's going to be 100% of the case in + future. I think it's a tool. But even if that was the case, we would still need accessibility guidelines in order to + make sure that the site was being built to a particular standard and the site passed a particular standard in terms + of requirements so that it would be expected to be accessible. + And so I think there's still a need for accessibility guidelines. And in a way, my answer to that question probably + says more about my perspective, which is we are building these things for people, and that means to me that people + really are best placed to be involved in making those creative decisions around both the building of it and the + creative or subjective decisions in the testing of it. + It remains to be seen how effective machine learning can be as a tool, but there's definitely certain things that + seem like exciting avenues for exploration. So that's my thought on that particular question. But I'd like to turn + it over to either or both of you to see what you think about that question. And apologies for doing this suddenly. + Hopefully my rambling has given you time to think about it. +
+YELIZ YESILADA: Matthew, I just want to add there I think we need guidelines, one for more + another. Because what + I see also in application of AI, we really need expertise. We need experts to we need people who have good + understanding of the requirements of disabilities and disabled people such that they can also encode it into + algorithms. You know? So when we say "AI," of course, these AI algorithms have to be developed, they have to be put + in action, they have to generate the models. In order to generate models, of course, we need experts that understand + the requirements of the disabled people and disabled users. And the understanding of those requirements are encoded + in the guidelines. I mean, if you call them guidelines or requirement documents, one form or another, we will need + them because we need people to have good understanding of what is needed, I think. Because I didn't mention it at + the beginning, but I also did see this as one of the challenges for AI advancement. We need people who are good at + algorithms development and application of, you know, generating models, et cetera, but we need people also having a + good understanding of good requirements and good accessibility requirements. I think these guidelines or the + "requirement documents" they are an excellent place for communicating these kinds of requirements so they can be + automated or modeled in one form or another.
++ MATTHEW ATKINSON: Yeah, and to me, this is a continuation of the well known principle that if you + want to really + find out how accessible your site is, get some people who are facing accessibility barriers to test it. Somebody + like me can come along and tell you where areas of potential risk are and technological solutions. At least me in my + previous role. And that's all very well and good. And I do have some lived experience of disability; I have a vision + impairment. But the best people to tell you are the people that are going to be using your site. And so it's always, + always the best idea to get input from real people using your products and services as often as you possibly can. + So just coming back to that question, do we need accessibility guidelines, Alain? +
++ ALAIN VAGNER: Yes, so I think it is really needed. I just wanted to add something that is probably + less + interesting for most of you, but it's interesting for me. It's the legal part of it. So for us, for all the public + sector websites, it's in the law, so the website should be compliant with the guidelines. So if there is no + guidelines, we will have an issue. So we need to we need somehow a scale. We need to be able to compare. We need to + be able to say if a website is compliant or not. And this cannot be done without any guidelines. + And this is also important, I don't know, for the business also because you know the European directives are often + an economic impact. And one of the aspects of the Web Accessibility Directive was also to develop a uniform market + for accessibility in Europe. So we need these guidelines to have this uniform market. +
+MATTHEW ATKINSON: Excellent. So thank you very much for that perspective. We do have some + questions that have + come in. One of the ones I briefly wanted to come back to is there was the general question that we got about could + AI be trained to evaluate accessibility? And I think we've all said that there are possibilities here, but there are + challenges. But one of the things that was mentioned was this European wide monitoring exercise. And Alain, you + mentioned, who knows, maybe some of the data for that could be used to train AI. And I am just wondering, Alain and + then Yeliz, what your thoughts on that are, and then we can go to some of the questions that we've got in queue. +
++ ALAIN VAGNER: Yeah, so I would say I think it should be possible, but probably the data should be + of good + quality. This is something Yeliz already mentioned. And we didn't think about it when we produced our report. So I + would say for now, maybe we should also discuss with AI specialists that could tell us what they need as input to be + able to train their models. But I think there are some opportunities or there are also some kind of pretrained + models. I don't know if this is really totally answering your question. But for example, we have lots of programs, + as I said, linked to languages, and there are some pretrained models, like I don't know, and these languages could + help us a lot regarding our mixed language issues in the pages. So this, I think this is something that the model is + already there, more or less. Maybe we need to refine them for some of the languages we use here that unfortunately + may not be globally available, but yeah, this is one point. + Yeah, for the rest, I would say that's it for me. Thank you. +
+MATTHEW ATKINSON: Okay. Any additional thoughts on that, Yeliz?
++ YELIZ YESILADA: I just want to say, as I said at the beginning, AI is not the silver bullet. So + it's not going to + actually solve the full problem, in my opinion, in the near future. We need a lot of development in the area. But of + course, there are certain areas that we mentioned that it can really help. I think we already mentioned them, so I + don't need to repeat. But there are certain things that AI can AI models can be generated to help out with the full + process of evaluation, I think. + Matthew, I hope I answered. +
++ MATTHEW ATKINSON: Super from my perspective. + So there's one question here that I feel like I can answer just myself, although I will invite you guys to chime in. + And it's a good question, as they all are. Are there any accessibility guidelines or standards that we can use to + evaluate AI or machine learning interfaces such as chat bots or ChatGPT? +
++ From my perspective, the answer is yes. It's WCAG. It's the existing standards. These interfaces are presented via + the Web, and so you can apply WCAG to them. + Slightly more specifically, there are and on a little bit of a tangent, there is a growing resurgence of command + line user interfaces, especially for developer tooling. And command line interfaces that actually operate on a + machine natively, you can't apply the whole of WCAG to them, but there is work at W3C that tells you which bits you + can apply. Just as we've talked about in other areas, WCAG is being applied in a range of different areas. But + whilst these chat bot interfaces, they might look very conversational and almost like a command line interface in + some ways, they very much are, to my knowledge at least, being presented as Web apps and, therefore, I would say + that WCAG is the best set of guidelines for that. + Do either of you if either of you have any additions to that or differences of opinion on it, then I'll just give + you a couple of seconds to say so, and I will try and pick one of these other questions because we've got a few. +
+YELIZ YESILADA: I agree with you, Matthew, so I have nothing to add.
+ALAIN VAGNER: Same here.
++ MATTHEW ATKINSON: Okay. So I see a couple of questions along the lines of could we say that AI will + never be able + to complete the last kilometer of on its own whilst doing accessibility testing or remediation? And I think at the + moment, we are all saying pretty much that. And we've talked about that a little bit, but it's a nice way of + phrasing it. + There's a question here that says we know that AI is helping with meeting and perhaps even evaluating compliance. Do + we know of any examples where AI has broken things that were working well? Would either of you like to talk about + that? +
+YELIZ YESILADA: I can add there. I think we mentioned sorry, I jumped in, I think. I just wanted + to say that, of + course, in AI algorithms, accuracy is very important. So if the accuracy of the models are, of course, not high, + that means that they will not be able to handle certain things, and they will make wrong decisions. So we can see + that they can actually break things up. We see in, like, caption generation or alternative text generation that at + certain times, for example, the models are not able to generate automated caption properly or automated alternative + text. That's just what I wanted to say.
+ALAIN VAGNER: I have maybe another example also in the same vein, so it's the same idea. I have + been doing + recently lots of tests for some tools for PDF remediation. So we have lots of problems on the public sector + websites. There are, let's say, lots of PDF documents that are available on the website, and they are totally not + accessible. So we have done some statistics, and on the 20 biggest websites in Luxembourg, approximately 60% are not + accessible, which is really big. And on this, so some of the organizations asked us but yeah, it's really we have + tons of PDFs. How will we be able to remediate them? And there are some AI tools, so they were testing them. We also + tested them. And we have seen that so AI's mainly involved in the auto tagging, so tags are, in fact, metadata in + PDF documents that are used to express the structure of the document for assistive technologies, in particular for + blind people, for example. And this auto tagging using AI is a bit better than auto tagging based on heuristics, but + it's still not there. So I have seen that some companies are announcing that their AI is more performant than manual + tagging, but from my experience, it's not the case. I would be interested in seeing independent tests on this. + Independent tests, that would be really helpful to see to what degree are these tools able to automatically tag + documents.
+From the issue we have seen, there were some things like you mentioned before, the complex problems, like the + detection of headings in the tables, et cetera, detection of artifacts, what is decoration, what is not. False + reading of complex layouts, so when you have complex layouts on pages, the reading is often not really good. And the + detection also of some patterns. So in documents, you have some special patterns, for example, like table of + contents. The table of contents is a special kind of pattern, and it should be detected by this AI. So these were a + little bit the points where one or two AIs I have tested were not able to detect everything. But I think there is + some, yeah, room for improvement there, of course.
++ MATTHEW ATKINSON: Okay. We've got three minutes left. I have seen one other question, which I wish + I had seen it + earlier. But I will do my best to just set out some general ideas about it. This is a complicated subject, so wish + me luck. + So somebody's asking could knowledge of overlay tools be usefully used for technical monitoring? +
++ And I think it's important to introduce people to what the concept of an overlay is. At its most basic level, it's + imagine you have a site, your organization has a site, and an overlay is effectively third party code that you + import and you run on the client side in the browser. And its goal is to detect accessibility problems and to fix + them. And as you can see from our discussion, our view is that there is potential for some things to be done with + machine learning, and there's still a long way to go with a lot of other things to do with machine learning. And so + there are differences of opinion in the industry as to the efficacy of these tools. But as you have seen from our + discussion, you know, there's openness to exploring things. + But overlays being run, if they were on many sites and they had the opportunity to see many sites, I think the + question is can that add up to some useful monitoring experience for us? + I am not sure that there would be enough verified data to start forming a model from that. But very quickly, I am + just wondering if either of you have a few thoughts on that. I think it's going to just have to go to one of you + because we've only got a minute left, so I apologize for that. But if any of you have got any extra thoughts on that + to add to mine, please do. +
+ALAIN VAGNER: It's a good question. Yeah, it's difficult to say. So from our experience, these + tools can be + interesting for some fixes. But also, we should not rely only on them. It could be, for example, a first step. We + have done something on our website. We have included a bit, it is not the end of the road. So there are some stuff + that still should be done on the side of the authors, on the technical side on the website. So you cannot detect, as + we have said, automatically all the accessibility issues. So you cannot if you cannot detect them, then you cannot + fix them. So there is always still room for manual testing and manual evaluation and, yeah, improvements of the + accessibility of websites.
+YELIZ YESILADA: I agree with Alain. Matthew, I just wanted to add, I think we already mentioned + it, as with the + AI algorithms, these overlays, as well, I think we have to carefully approach them. You know, especially their + accuracy, reliability, and transparency. So we have to, I think, carefully approach them as with the AI models. So + rather than targeting to replace the evaluator, maybe, you know or evaluator or an author and to fix the problem, we + can actually use them as, like, a supportive role. So making sure that they are checked afterwards whether they are + doing the right thing or not based on the reliability, accuracy, and the transparency, as I mentioned.
+MATTHEW ATKINSON: Cool. And we have to wrap it up there, and I apologize for us going over by a + minute. I will + just say again thank you to everyone, and especially Alain and Yeliz. And if you want to discuss issues like + particularly like the last question, I think the Accessibility at the Edge Community Group would be a good place to + do it because we are trying to get consensus in the industry on issues just like this. And also please check out the + Ethical Machine Learning Principles. Thank you very much. Back over to Carlos.
+CARLOS DUARTE: Thank you so much, Matthew, Yeliz, and Alain for this another wonderful panel. I + think we have + been really lucky with the three panels today. It was an excellent experience, I guess, with three different + perspectives or three different topics on how to increase digital accessibility of resources.
+So once again, just to conclude, many thanks to all our participants today Jade, Sarah, Audrey; Detlev, Andre, + Paul; and Matthew, Yeliz, and Alain. It was a privilege to attend this session, and many thanks also to the + interpreters and the captioner for their excellent work. And as I said at the beginning, this is the final symposium + of the WAI CooP Project, but hopefully this trend will not end here, and W3C can pick up the organization of this + Symposium and continue them in the future, hopefully. So thank you so much for all panelists. Thank you so much to + all attendees. And looking forward to the future developments on Web accessibility. Bye bye.
+ + + + \ No newline at end of file diff --git a/pages/about/projects/wai-core-2015/index.md b/pages/about/projects/wai-core-2015/index.md new file mode 100644 index 00000000000..4b57a47b87d --- /dev/null +++ b/pages/about/projects/wai-core-2015/index.md @@ -0,0 +1,69 @@ +--- +title: "WAI-Core Project" +nav_title: "WAI-Core 2015" +lang: en + +permalink: /about/projects/wai-core-2015/ +ref: /about/projects/wai-core-2015/ + +feedbackmail: wai@w3.org +--- + +{::nomarkdown} +{% include_cached box.html type="start" h="2" title="Introduction" class="full" %} +{:/} + +This page provides information on the Web Accessibility Initiative - Core **(WAI-Core) Project** sponsored by the National Institute on Disability, Independent Living, and Rehabilitation Research ([NIDILLR](http://www.acl.gov/programs/NIDILRR/)), US Department of Health and Human Services ([HHS](http://www.hhs.gov/)). + +{::nomarkdown} +{% include_cached box.html type="end" %} + +{% include toc.html type="start" title="Page Contents" %} +{:/} + +{::options toc_levels="2" /} + +- The TOC will replace this text. +{:toc} + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +## About WAI-Core + +The WAI-Core Project, funded by NIDILRR at the US HHS, provides support for accessibility work at W3C WAI to: + +Task 1: + +* Provide staff support and technical expertise within a Working Group of Web accessibility experts representing different stakeholder groups, including via recruitment of public comments on draft technical reports from other W3C Working Groups, W3C Interest Groups and W3C Community Groups, and as needed to develop technical specifications that support accessibility, such as WAI-ARIA and Accessibility Application Programming Interface (API) mappings; +* Update documentation as needed of accessibility experts’ guidance on accessibility requirements, specification design considerations, and known gaps, drawing on experience gained from reviewing resolutions to accessibility barriers in a wide range of Web technologies, to serve as a general introduction to accessibility self-review by other W3C working groups and to facilitate the scaling of accessibility expertise to a larger number of W3C specification reviews (currently Web Technology Accessibility Guidelines (WTAG)). + +Task 2: + +* Maintain one or more multi-stakeholder Working Groups of Web accessibility experts to develop updated guidance and technical support materials on Web content and applications, authoring tools, browsers, mobile applications, and other Web-enabled applications, devices and services; This may be in stand-alone, combined or modular format, including extensions of previous guidelines, or as updated combinations of previous guidelines. It may include accessibility guidance and technical support materials for technologies relating to industries newly converging with the Web, for example mobile, digital publishing, Web of things, TV and Web, education, health care, or other areas; and may include extensive guidance and/or techniques for areas such as cognitive and learning disability, low-vision accessibility, and/or voice input accessibility. + +Task 3: + +* Review, provide analysis of, and comment on understandability, readability of and usability of accessibility standards and implementation reference materials; develop non-technical introductions to accessibility guidelines and standards, and overviews of accessibility user requirements for people with disabilities; provide education and outreach activities to support acceptance of Web accessibility guidance; comment on draft standards, regulations, and/or policies where necessary to promote acceptance of Web accessibility solutions. + +## How to Participate + +WAI-Core work is developed through [W3C/WAI Working Groups](http://www.w3.org/WAI/groups). Working Groups that are currently relevant to WAI-Core work, and links to information on participation, include: + +* [Accessible Platform Architectures (APA) Working Group](/about/groups/apawg/) ([participation in APA WG](/about/groups/apawg/participate/)), for review of accessibility in W3C specifications; +* [Accessible Rich Internet Applications (ARIA) Working Group](/about/groups/ariawg/) ([participation in ARIA WG](/about/groups/ariawg/participate/)), for development of accessibility specifications as needed, such as ARIA, and Accessibility Application Programming Interface Mappings; +* [Accessibility Guidelines (WCAG) Working Group](/about/groups/agwg/) ([participation in AG WG](/about/groups/agwg/participate/)), for development of accessibility guidelines; +* [Education and Outreach Working Group (EOWG)](/about/groups/eowg/) ([participation in EOWG](/about/groups/eowg/participate/)) for improving understanding and usability of accessibility standards and implementation reference materials, developing non-technical introductions to accessibility guidelines and standards, and providing education and outreach to support implementation of Web accessibility guidance. + +In addition to participation opportunities, everyone is welcome to review drafts. Draft in progress are highlighted on the [WAI home page](http://www.w3.org/WAI/). To get notifications of drafts for review, see [Getting WAI Announcements](http://www.w3.org/WAI/about/announcements) for links to WAI tweets, RSS feed, and WAI Interest Group (WAI IG) emails. + +For more information on the WAI-Core Project and other WAI work, see [Getting Involved with WAI](http://www.w3.org/WAI/about-links.html) and [Participating in WAI](http://www.w3.org/WAI/participation). If you have any questions, contact [Shawn Lawton Henry](http://www.w3.org/People/Shawn/). + +## Project Staff + +The following W3C staff are supported in part by WAI-Core funds: + +* [Shawn Lawton Henry](http://www.w3.org/People/Shawn/) +* Judy Brewer (through December 2022) +* Michael Cooper (through July 2023) diff --git a/pages/about/projects/wai-core-ford/index.md b/pages/about/projects/wai-core-ford/index.md new file mode 100644 index 00000000000..01ce395599e --- /dev/null +++ b/pages/about/projects/wai-core-ford/index.md @@ -0,0 +1,68 @@ +--- +title: "WAI-Core Ford" +nav_title: "WAI-Core Ford" +lang: en +last_updated: 2023-07-27 + +permalink: /about/projects/wai-core-ford/ +ref: /about/projects/wai-core-ford/ +feedbackmail: wai@w3.org +--- + +{::nomarkdown} +{% include_cached box.html type="start" h="2" title="Introduction" class="full" %} +{:/} + +This page provides information on the Web Accessibility Initiative WAI-Core Ford Project supported by the [Ford Foundation](https://www.fordfoundation.org/), [Technology and Society Program](https://www.fordfoundation.org/work/challenging-inequality/technology-and-society/). + +{::nomarkdown} +{% include_cached box.html type="end" %} +{% include toc.html type="start" title="Page Contents" %} +{:/} + +{::options toc_levels="2" /} + +- The TOC will replace this text. +{:toc} + + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +## About the Project + +Current Ford Foundation funding seeks to strengthen WAI work overall including through the continual process of updating our technical guidance supporting accessibility for the Web community. In addition, we seek to continue our work to extend the benefits of WAI to people with disabilities in low resource regions. This includes work to expand access to the Web and mobile for people with disabilities in low resource regions, in order to increase their ability to participate in all aspects of the information society, through work that will enhance the overall quality of WAI work while also supporting the missions of W3C and MIT. + +## Main Areas of Work + +1. Improve understanding of needs of people with disabilities in low resource regions.Visual | +Audio | +
---|---|
[Shadi speaking] + Slide 1: About the Project |
+ Just a little bit about the project itself. WAI-Tools Project is an acronym for Advanced Decision Support Tools for Scalable Web Accessibility Assessments of the Web Accessibility Initiative. So, WAI-Tools. + So, it’s a project of the W3C Web Accessibility Initiative co-funded by the European Commission, the EC, under the Horizon 2020 program. It’s a so-called Innovation Action project. So there are different types of projects that the European Commission supports and this is one of them. It’s under the line of innovation action. + It started on the 1st of November in 2017. It also – it already feels like many decades ago. And unfortunately actually, coming to an end at the end of this month. As I mentioned, it’s been a really great project, great fun working with the partners. |
+
Slide 2: Project Partners | +Speaking of the partners of the project, the lead partner is the European Research Consortium for Informatics and Mathematics, ERCIM which is the European host for W3C. So, this is the legal footing of W3C in Europe. + Other partners in the project are Siteimprove from Denmark, the Accessibility Foundation in the Netherlands, the Norwegian Digitalisation Agency, Digdir, and the Administrative Modernization Agency in Portugal, AMA. The University of Lisbon, FCID, in Portugal, and Deque Research which is based in the Netherlands. + So, these are the project partners who together are responsible for moving this forward, this project, but really a lot of this work is happening or happened in – directly in open W3C groups and we’re going to be talking more about that, about how the project work has happened and how you can still contribute to some of this project work which is going to continue happening in these groups. |
+
Slide 3: Objective 1 | +But just to give you a little bit of the objectives of the project, what did we really want to achieve with the project? One of the main objectives here on the screen is an illustration where at the very top of the illustration is kind of the ultimate goal. We all want compatible results, right? + When we are evaluating for the Web Content Accessibility Guidelines which is the layer below, we’re evaluating for accessibility or testing for accessibility, we’re using different methodologies, so different methods. But at the end of the day, we’re testing for the same set of guidelines and we want to get compatible results. We want to get the same results when we test the same things, right, no matter how we actually test them. |
+
Same slide; highlight around "ACT Test Rules (open royalty free)" middle layer of the diagram | +And so, this was really the focus of the project, to look at what we call ACT test rules, Accessibility Conformance Testing rules. The point is these are very, very small procedures for testing that can be combined together for different methodologies. And methodologies here is a broad term. It could be automated tools or anything. Any procedure that you follow for manually checking, this is a methodology as well. So, really, anything that encodes how you check accessibility is a methodology. It could be manual. It could be automated. It could be a combination, semi-automated, so a combination of automated and non-automated approaches together. + So, if we have very small building blocks, and they are depicted here as these small boxes, these small squares, very, very small, like Lego pieces, we can combine these together to provide different methodologies but at the end of the day, if we test the same thing, we want to get the same result. + So, that’s really the objective or the vision of the project is to try to get consistency because we know that one of the big issues of accessibility right now is that we don’t have that consistency. We have different tools that provide different results or different evaluation methods. So, you go to one person and you get one result and then sometimes you can get slightly different results when you go to a different person to get it checked. That’s a big problem for accessibility that we’re trying to address to have more consistency of how we test. |
+
Slide 4: Objective 2 | +Another objective of the project, by defining these checks and finally agreeing on them and not having to constantly re-repeat how we test certain things, how do we test the text alternative or how do we test the label. If we can finally decide and agree on a common interpretation for these checks, maybe we can increase automation in that. + Right now, a very large part of accessibility needs to be tested manually. But we also know that there is a lot of advancements happening in the technology. Buzzwords, artificial intelligence or machine learning, natural language processing. All of these technologies are evolving, all of these innovations that would essentially allow us to increase the level of automation. + It doesn’t mean that accessibility will be fully automatable, at least in the foreseeable future, but at least we can maybe increase the amount of tool support which would be – would make things much more effective and much more efficient if we can increase this level of automation. |
+
Slide 5: Project Deliverables | +So, these are really the objectives or the core pieces of the project. The deliverables of the project, which we’re going to go into more detail, but hopefully from a more applied side, is the main part of the project, the heart piece is to develop Accessibility Conformance Testing rules, right. So, this is to create an initial set hopefully to get the work in the community kickstarted to help establish an active community that will sustain beyond the project. So, this is a call on – call for action for you to get involved in this work that will continue beyond the project. + We implemented these rules that we developed in different tools from project partners in the project, here specifically the partners Siteimprove in their checker called Alfa, in Deque in their checker called Axe-core, and in – a tool from the University of Lisbon in a checker called QualWeb. + And we also validated these rules in real-life situations. We had a set of websites that we took and that we ran these rules across. + Now, these rules were applied in different ways, were test-driven, or were taken up by the Portuguese and Norwegian observatories. These were part of the project to kind of really ground us and look at when entire countries want to do large scale monitoring of accessibility, what are their needs? What can we learn from that? And how can we provide them with different types of rules that will support getting them valuable information? + We looked at improving the existing WCAG-EM Report Tool. So, people might know that tool. It already existed. WCAG-EM stands for WCAG evaluation methodology. It actually has a longer name, Website Accessibility Conformance Evaluation Methodology. It’s a long name. But this is an evaluation methodology for WCAG that can be used to test entire websites. And it was a tool to help you walk through that and now we have added to that tool an import function so that you can import test results from automated tools, again here to show the idea that we can combine automated and manual when we’re using open standards here. + We also created an accessibility statements generator tool. So, this helps you create accessibility statements for your website once you have done the testing to explain to the world the accessibility that you have on your website. + And last but not least, a demo, a proof of concept to show, to demonstrate the potential of open linked data for accessibility. So, what if – right – what if we had these test results being published, at least those by public bodies, and we could connect these and we could scrape these and we could do a more active or decentralized monitoring approach? |
+
Slide 6: Project Results and Furhter Information | +So, these are the project deliverables. More information is on the website. We will provide you these links at the end again so you don’t need to start looking at the links. The address is w3.org/WAI/Tools. That is the project page for the WAI-Tools Project and you can find here further information, all the resources, all the deliverables, including this open meeting with the slides online as well. | +
Visual | +Audio | +
---|---|
[Shadi speaking] + Title Slide: ACT Rules Developmeent |
+ So, let’s move directly to our first session on the development of Accessibility Conformance Testing rules. I want to ask on the virtual stage, Wilco Fiers from Deque and Kasper Isager from Siteimprove and Carlos Duarte – Carlos Duarte from University of Lisbon. + If the three of you could just briefly – Wilco can you say a few words about yourself, just introduce yourself to the audience. |
+
[Wilco speaking] | +I sure can. Can you guys hear me alright? | +
[Shadi speaking] | +Yep. All fine. | +
[Wilco speaking] | +Fantastic. Alright, so my name is Wilco Fiers. I work for Deque Systems. I am a product owner and project manager. There, I am responsible for Axe-core which is Deque’s open source accessibility engine which Shadi mentioned. + In addition to that, I spend part of my time on W3C activities. I am the chair of the ACT Rules Community Group, a facilitator on the ACT Task Force, as well as a member of the Silver Task Force which is currently developing WCAG 3. |
+
[Shadi speaking] | +Thank you, Wilco. And all these groups that Wilco mentioned, the task force and community group and so on, will be explained in this presentation. | +
[Wilco speaking] | +Don’t worry about. We’ll get it. We’ll get to it. | +
[Shadi speaking] | +We’ll get there. Piece by piece. We love acronyms in W3C. | +
[Wilco speaking] | +I have too many hats also, to be fair. | +
[Shadi speaking] | +Kasper, please introduce yourself. | +
[Kasper speaking] | +Yes, I will make it short. I don’t have as many hats. My name is Kasper Isager and I work as the product owner of Alfa at Siteimprove. And then Alfa is our accessibility conformance test engine. | +
[Shadi speaking] | +Welcome. Well, you don’t need to be so humble. Kasper is a mastermind of a lot of the accessibility engines at Siteimprove as far as I know. | +
[Carlos speaking] | +Hi, I’m Carlos Duarte. I teach – well my main responsibility is to teach computer science at the University of Lisbon, although my research is focused a lot on web accessibility. And so, as part of my responsibilities in teaching computer science, I also teach web accessibility to my students and I also have some more hats. + Together with Wilco, I’m co-chairing the ACT Community Group and also another hat for the W3C where I do work for the Education and Outreach Working Group, specifically on preparing the new – the curricula, the new web accessibility curricula. |
+
[Shadi speaking] | +Thank you, Carlos. And Wilco, go ahead. Take it away please with the presentation. + And this time you are muted. |
+
[Wilco speaking] + Slide 2: Goals of ACT |
+ I should unmute. Do you mind going to the next slide? + Thank you very much. So, I just wanted to get into the goals of ACT really quick. Again, ACT stands for Accessibility Conformance Testing. The project wasn’t setup as part of WAI-Tools. It predates it by several years. Some of you may also know it as Auto-WCAG which is a group that later got renamed to the ACT Rules Group. + And the reason that group was started off is to improve consistency in accessibility testing. So, one of the greater challenges Shadi mentioned is consistent testing. You do want, when you get a report from somebody, for that report to be accurate and based on well-founded ideas. Otherwise, you might fix something and you have somebody else take a look at it and they find more things. Or they may find completely contradicting things which can be fairly frustrating. If you don’t have a clear definition of when your project is sufficiently accessible, that makes life pretty hard. + This kind of gets exacerbated when you look at something like European Union which is starting to track the accessibility of government websites across the whole of Europe. + If Denmark tests things in a different way than the Netherlands does, what you’ll end up with is with results that don’t – aren’t very comparable. Even worse, you might have a website that is relevant in multiple countries and if they used different testing methodologies to gauge whether or not you complied with the law – because that’s what we’re getting to at a certain point; it should be legally required to meet these standards – if they are testing along different methods, you may find that your website complies in one country but not in the other even though you’re testing along the same WCAG standard. + So, that’s really the issue and the problem isn’t too extreme. Like, it’s not like WCAG is all over the place and results go anywhere. But there are differences there and it kind of erodes the trust that you can get from results like that. + So, that is what ACT was setup to do. And our goal there is, as I mentioned, we started first off with Auto-WCAG which is really focused on doing this for automation. But then we broadened it out to address testing methodologies as well. And testing methodology is not a well-established term but if you ask around in accessibility institutions around the world, most of them have some sort of document that tells you how to interpret WCAG, like what you do with this particular element on a page and with that particular element on a page. + One of the great things about the kind of accessibility guidelines is that it was written in a way that it is technology agnostic so that it can live for a long time, so that it isn’t just applicable to one particular version of the web standards, but it should be applicable to all of them. But that also means there is some vagueness there that does require that interpretation. + So those interpretations, those are what we are trying to capture with the ACT Rules and so this is applicable really to lots and lots of organizations. + Next slide please. |
+
Slide 3: Benefits of ACT | +So, we’ve developed this rules format, the ACT Rules Format, and the benefits of that are fairly wide. So, like I mentioned, we’ve been working with this format for several years now. That is really the goal of this project, is to use the rules format and develop these rules. And it has really improved the quality of the organizations that have adopted this, like in noticeable ways things have gotten more consistent. That has been between the project partners but also organizations outside of the WAI-Tools Project that have been using the ACT Rules to make their tools, to make their testing methodologies more consistent. So, we are seeing concrete evidence that this approach seems to be working. + By writing these rules, we then have a more authoritative set of things that we know are conforming to WCAG or are not conforming to WCAG and what we’ve ended up doing is to share these rules with the W3C and to have those published. There are currently eleven of the rules that have been developed, that have been published by the W3C. So, these are really validated by the people that wrote the standard. So, this gives us a lot more credibility as well for the work that we’ve done. This is the way you’re supposed to interpret WCAG for the HTML standard or for SVG documents. + And by doing this, we resolve a lot of open questions for WCAG. One example of such a question is if you have an image with the text right next to it and the image basically says the same as the text, does that image need to have a text alternative or not? It’s kind of a debated question. Some accessibility experts say yes, some accessibility experts say no, you absolutely should not do that, and the rules answer this question for us definitively so we do not have to have these conversations anymore. It clears things up in a lot of ways. + And by clearing this up, we enable organizations to test more consistently, which – allows accessibility results to be more widely trusted. You can trust that one organization using ACT Rules will produce almost identical results to another organization using those same sets of rules. + There can still be small differences but the differences are far smaller than what they used to be. + Next slide please. |
+
Slide 4: Festures of ACT | +So, just to get into some of the things that you get from ACT Rules. I think I might have been going over time already, so I’ll keep that quick. + So, ACT Rules use unambiguous language which is great because it helps you answer things in a very precise way. If you need to know what something means, how to interpret a thing, the ACT Rules offer a very precise description. + So there’s no more, well, it depends kind of thing going on. ACT Rules are very clear on when something should pass and when something should fail. + ACT Rules also document assistive technologies. If something doesn’t work in some screen readers but it does in others, you can find that information in the rules and this helps you frame how to understand the results. + So, in some cases you may still see an issue if you try something with a particular screen reader that you don’t see in the report. That really helps you understand what the results actually mean. + Then they document assumptions. So, if there are very unlikely edge cases or if there’s a particular interpretation that may be controversial or that is unusual, then that will be documented in the rule itself so you will understand why is the result the way it is and that really helps you frame the results again. + And then lastly but not least important either, diagnostic for tools and methodologies. We want ACT Rules to be something that can be implemented irrespective of whether or not you’re doing it manually or if you’re automating. That really shouldn’t matter. It shouldn’t matter if you’re using a particular technology stack in your tool or some other technology stack. These are written in a way that everybody is doing accessibility testing. Whether it be fully automated or completely manual or somewhere in between, you should be able to implement these. + I think with that, I am passing it on to Kasper. |
+
[Kasper speaking] + Slide 5: ACT Rules Developed By The Project |
+ Alrighty. So, I will just briefly talk about the rules that we developed as part of the project and then also briefly touch on how these were actually developed. + So, just for an overview, we initially set out to develop a total of seventy of these ACT Rules, of which we wanted thirty to be fully automatable, twenty-five of them were to be semi-automatable, and fifteen were to be completely manual rules. + And we started out by trying to cover as much crowd as possible in terms of the type of content that the rules dealt with. So, we were looking at the text content, graphics, audio/visual media, all the stuff that was really based on, in many cases, the existing rulesets that the individual project partners already had implemented in their individual tools. + And then later on as the project progressed, we started seeing more and more specialized rules focused on increasingly narrow aspects of their content. We started looking at stuff like orientational arcs implemented using CSS and also things like CSS declarations that couldn’t be overwritten by the users. So, much more specialized failure cases than the stuff that we initially covered. + We also ended up developing several rules that didn’t relate directly to the WCAG but we are looking at things like WAI-ARIA conformance and also its associated authoring practices that define best practices for use of WAI-ARIA. + And with that, I will cue the next slide. |
+
Slide 6: How ACT Rules Were Developed | +Thank you. + So, as already mentioned, these rules were developed in an open W3C Community Group, and within that group the first thing we of course had to decide, also among the project partners, were which rules we actually wanted to develop. And for this, we had a list of a defined selection criteria so we would look at things like, so, a proposed rule for example or how many new additional success criteria from the WCAG does it cover that we haven’t already covered, and what is the, sort of the options both across the project partners and how easy would it be to implement in our tools, and also how likely was it that this was a rule that would be picked up by the community. + And then once we had selected rules to develop, we would incubate them in this open community group with input both from the project partners but also the wider community. And this is really when the bulk of the work was spent, with the biggest and most important point probably being reaching a consensus on interpretation of whatever success criteria it was that the rule is supposed to test and also researching, like, the real world implications of what the rule would look at, figuring out so under what assumptions would the thing that the rule looked at actually be a problem. And in case those assumptions didn’t hold, then would there actually be an actual problem? + So, really finding out where, you know, theory and reality aligned and agreeing on an interpretation that will then form the basis of the rules that we developed. + And once we had the rules developed, they were implemented by the project partners in our various tools and also validated by another project partner on this fixed set of real web pages that we had selected beforehand. And then once implemented and validated and matured in the community, we would ship them off to the ACT Task Force to eventually be included as official live resources on the WAI webpage. + And with that, I will pass it on to Carlos. |
+
[Carlos speaking] + Slide 7: Adoption and Impact of ACT Rules |
+ Thank you, Kasper. I’m going to start by talking a bit about how does this impact you, okay. So, what does it mean for you, for the community, that we have developed seventy rules? + So, first it means that we now have more than seventy different aspects about WCAG that at least three different organizations agree on how they need to be tested. Okay, so I think this is already very significant. + And when I say at least three different organizations, I’m not saying three different project partners, and this is something that Kasper just talked about. So, because the main work we’ve done in this project was done in the scope of the ACT Community Group, which means that it was an open group and its work was publicly available. So, we are not just three project partners reaching consensus. It’s wider than that. + And there were involvement at different levels from these organizations outside the project that fortunately was not just at the rule writing and reviewing level. So, not surprisingly, the project partners included in their products, the ACT Rules that we were developing. But I think we can say that we are really happy to see that other organizations started to do the same. So, the other two vendors have now products that include conformance testing using ACT Rules, manual testing methodologies that also include ACT Rules. So, from this you can understand that ACT Rules already have an impact on several tools, on several methodologies, and let me stress this out again, not only from the tools and methodologies made available by the project partners. + So, if you use one of these tools or if you have your projects checked with one of those methodologies, then ACT Rules have already impacted you, and we can also say – you will be able to hear about this in the next presentation – that these tools and these methodologies are being used by monitoring bodies across Europe. So, we are already witnessing the impact of ACT Rules at a large scale. + And we believe that ACT Rules will play an important role for the European monitoring efforts as Wilco said. So, given the consistency that the ACT Rules offer, even when monitoring agencies from different countries use different tools, if those tools implement ACT Rules, then their results can be compared with a higher degree of confidence. + We think this is already a great start but in the future, we can only hope that the impact will be even bigger. So, as also Wilco mentioned, eleven of the ACT Rules developed by the project and adopted by the community have already been published by the W3C. There are more in the pipeline that should soon increase this number and I think that the interest in ACT Rules is only bound to grow when the community starts finding ACT Rules elsewhere. + Shadi, can you move to the next slide, please? |
+
Slide 8: Understanding ACT Rules | +Thanks. So, you have a screenshot, so a sneak peek at the Understanding document for WCAG 2.2 soon to be released. And you can find there – yes, thanks, Shadi – a new section on Understanding ACT Rules and we have this section at the same level that we have currently for the Understanding Techniques for WCAG’s success criteria. So, this is really exciting and we are really seeing ACT Rules being adopted and promoted. | +
Slide 9: Getting Involved in Further work | +Okay, so now I want to talk to you about how you can contribute to keep ACT Rules going on. + So, we like to believe that this was a successful project and that we really kickstarted the work on ACT Rules but now it’s up to you, to the community to keep this momentum going. + And fortunately, there are many ways in which you can contribute. For instance, if you are a tool or a methodology developer, you can start by checking how well aligned your project is to the current consensus expressed in ACT Rules. So you can read the rules that are related to the checks that you support. You can see how well you do on the published test cases. And you don’t agree with some aspect of the rule? Great. We did try our best but we know we are not flawless. So, raise an issue reporting what you found and it will be discussed in the community and the rule will be updated if needed. It wouldn’t be the first time. + While you are at it, you can send us the implementation reports of your tools and methodologies. In the ACT Rules website, we have a section for implementations where all reports that we receive are displayed. + So, this is a great way to let the community know that your products are supported by ACT Rules. And irrespectively of your developing tools or methodologies or being a user of such or a monitoring body or someone that’s responsible for publishing accessible information on the web, be happy because there are other ways you can help. We are always looking for more people to help reviewing rules. So, it’s great to have people with different experience and perspectives contributing to this work. + If you have an idea for a rule, you can write it. Don’t worry if you feel overwhelmed about writing a rule. You can team up with others and do it as a collaborative project. That’s what we are all doing here, collaborating to make the web more accessible. + And to do any of this, you don’t have to be a W3C member, but if you are or want to become one, then you can actively contribute to support the adoption of more rules into WCAG 2.2 and potentially the new WCAG 3. + So, Shadi, I think we can move to the final slide. |
+
Slide 10: Thank you | +And you can find more details about ACT Rules and how you can join this community at act-rules.github.io. And thanks for listening and I think Shadi, now you want to take some questions, right? | +
[Shadi speaking] | +Yes. Questions are coming in. Thank you, everybody who is contributing questions. If you have questions, now is the time in the Q&A function. + Before we get to that, just to get a little bit of a feel, how many people are subscribed to the community group and roughly how many are active in the calls and, you know, the active part of the work, not just subscribed? + Who wants to take that question? |
+
[Wilco speaking] | +I’ll take it. It depends a little bit because there are two groups, one in W3C itself and a Community Group which has no official standing at the W3C but is hosted by it. The Community Group has, I believe, around ninety members right now. As for active participants, again that depends on how active you mean active. I think right now we have ten to twelve people that regularly attend meetings like every month, every twice a month, something like that. There’s a whole bunch of people that are slightly less active but that still regularly contribute. As a rule comes up that they are interested in, they’ll drop a review. That happens quite a lot and I frequently get surprised by new people who have come out of nowhere and submitted some information. | +
[Shadi speaking] | +Thanks. Yeah, I just wanted to give a little bit of an idea and people are, as you were saying, Wilco, participating in different ways. Some people are attending the calls and discussing things. Others completely asynchronously, they are using the GitHub platform. | +
[Wilco speaking] | +Yeah, it’s – it takes relatively little time for anybody to participate. You can spend as little or as much time as you want. I do also think it’s worth mentioning that just from the people that have joined over the last year maybe, year and half, I frequently hear that they join and they find it a lot of fun and they learn a lot. + It is a group where you get to learn quite a lot of in-depth accessibility. So, if you’re interested in the nitty gritty details of accessibility, this is really a good group for you to participate in. |
+
[Shadi speaking] | +Great. So, one of the questions that we have is – I’ll read it aloud – WCAG is supposed to be interpreted to be relevant in different contexts. In one country, users might be using older assistive technology for example and it would be relevant to use all the techniques to meet the requirement. + Then in a country where all users have the latest versions of assistive technology, in practice it means a solution that is good enough in Germany for example, might not be good enough in Italy for example. And that is fine. So, how do the ACT Rules take this into account? + Silence? |
+
[Wilco speaking] | +You want to pick somebody? I mean I can answer all of them if you want, but. | +
[Shadi speaking] | +Go ahead. | +
[Wilco speaking] | +Okay. So, it’s a really good question. Thank you. How do ACT Rules deal with that? ACT Rules have an accessibility support section, so if we know about these differences, we will mention them in that section. You can find information about things that may work slightly differently depending on which technology you were using. That information is documented in the rule and if you happen to know any that isn’t, please open an issue and we will add it. It belongs in those documents. + I also think it’s worth mentioning those differences aren’t that big anymore. The types of assistive technologies that people are using tend to be international ones. There are only a handful of big players out there, so the differences really aren’t that big anymore these days. + What does matter a little bit is whether or not you exclude some of them. So, for example, if you want to exclude a common screen reader on the Mac, you might have slightly different tasks then if you wanted to include those. + The rule documents this kind of information for you. That freedom is available within the rule. |
+
[Shadi speaking] | +Thank you. Kasper, I think this one is for you. So, do ACT Rules only cover accessibility tests that can be semi-automated or fully automated? Or are there also rules that clarify rather subjective WCAG criteria like 2.4.6, adding the label that needs to be manually evaluated? | +
[Kasper speaking] | +You know, I can definitely provide an answer to that. And the answer is yes, we also have affordances for manual rules. I also mentioned we did set out to implement fifteen fully manual rules, so rules that could not reasonably be implemented in automated tools but could be implemented as part of manual test methodologies. + I would say, the manual rules are slightly more difficult in the sense that you still have to follow the ACT Rules from it, of course. And the ACT Rules from it are fairly strict on what kind of rules you all have to write. + One of the big things is that the applicability section of the rules for the description of what kinds of content the rule applies to has to be both completely unambiguous and objective. + And for some of the success criteria, the objectiveness criteria is really difficult to meet, it has proved. So, yes, you can author manual rules using the ACT Rules format. But it may be slightly more difficult that you still have to remain objective in the applicability section of the rule and that’s difficult to do with some of the more, I would say, subjective WCAG success criteria. |
+
[Shadi speaking] | +Right, but that’s – I think an important – thank you, Kasper. I think that’s an important question because it also came up in the questions that people asked in the registration form. + I think a lot of people think about when they hear rules, they automatically think of automated tools only and I think here it’s really important to emphasize that these apply to both manual and not – in fact, actually, we have several rules that we created as part of the project that relate to that specific success criteria on headings and we have others on labels. So, yes – it is done and we also have manual methodology developers who have been implementing these rules in their methodology, comparing their methodology. + Speaking of, this is a question that also came up in the registration form. Carlos, maybe this is for you. So, somebody wants to create an implementation report for their tool or for their methodology. How do they do that? Can you first explain what an implementation report is and then how they actually do that, how they provide such information? |
+
[Carlos speaking] | +Yeah, sure. So, perhaps I need to start by providing a bit of context on what’s inside an ACT Rule and specifically an ACT Rule besides having an applicability and expectation as a set of examples. + Some of them are examples of elements, documents, pages that meet the criteria and so pass the rules. Some of them are failed examples and some of them are examples of pieces of code that are inapplicable for that rule. + So, what we expect to have in an implementation report is how well does your implementation fair against those examples? So, does it pass the test examples? Does it fail the failed examples? And does it not consider the inapplicable examples of such a rule? + So, an implementation report is basically a report of the outcomes of your implementation for the different test cases that are described in an ACT Rule. + How can you submit one and how can you prepare one? Well, we expect your reports to be provided using EARL. The Evaluation And Report Language, serialized in JSON-LD. So, if you do provide us that, we can import that into our website and display it together with the current displayed implementation reports. + We have a section on the website, so if you go online to the website, there’s a section dedicated specifically to implementations and there are instructions there on how you can submit an implementation. And we have some tools that you can use to help you prepare an implementation report. We have all the test cases for all the rules accessible. So, it can help you prepare for implementation. + If you have some problems setting up an infrastructure for preparing implementation, do reach out to us and we should be able to assist you in that, in your project. |
+
[Shadi speaking] | +Thank you, Carlos. While I have you on the line, we’re kind of running out of time. There are more questions coming so we might need to push some of these towards the end of the day just to move on. But I think, maybe briefly, Carlos, this one it says, can you give a concrete example of the rule on should we have alt text in an image if the text that is next to the image is the same? + I think it would help to understand how you choose a specific rule. I think here, if you can explain the test cases in the context here that are part of the rules to help you understand what the rule actually refers to. |
+
[Carlos speaking] | +Wilco, you seem to have the rules in your head better than I. Which – this is definitely coming from the example that you gave in your presentation. Can you point me? | +
[Shadi speaking] | +Sorry. We don’t actually have the time to go into this. | +
[Carlos speaking] | +Okay. | +
[Shadi speaking] | +We could come back to this later. So maybe let me summarize. So, the point is that this rule, there would be a rule that is talking about, let’s say text alternatives for images and these will consider different techniques and the examples, the test cases in the rule would have an example when what happens if there is a description next to the image. What in that case, what the alt text should be? + So, if we were to write the rule for a good alt text, so the rule that we have right now only checks for alt text whether it exists or not. And if we would have a rule that checks how good an alt text is, it would be – we would have tests cases where we would have different types of situations and so that people could compare test cases towards that. + Again, we could come back to this later on. We are kind of running out of time but one of the things that was asked is, do rules cover all of the success criteria? Or will the work continue to cover everything? + Who wants to take that one? |
+
[Wilco speaking] | +I’ll go. So, we haven’t completed all the success criteria. I don’t know if we ever will fully complete everything but we certainly intend to continue to go on with this work. WAI-Tools Project is over but the ACT work is not. + This is going to be, you know, ongoing projects to continue to extend the coverage. Some of the things not currently done is so – our main focus has been on HTML where it sees it as an SVG. There are definitely still gaps in those, specifically the things that are difficult to automate, but beyond that, there are other technologies to consider as well. There are currently no rules for PDF for example. That is certainly a thing that I would love to see us develop. + So, there’s definitely more work and it is also good to mention that this is an ongoing process. As technology develops, as the HTML standard develops, as assistive technologies develop, these rules will continually get updated. So, unlike WCAG, these rules are an ongoing thing that need to be maintained so this work is never going to end and I’m going to continue doing this until I retire, so. |
+
[Shadi speaking] | +Yeah. I like to use the metaphor of snowballs, especially now in winter for many of us, so snowballing, and I think this project started the snowball with seventy rules and now it’s really, as Carlos was saying, now it’s up to you the community to help continue that. + Speaking of, Wilco, while I have you on the line, there was a question for you. This might be a misunderstanding. Can you talk about how authoritative the ACT Rules are versus the WCAG success criteria? What is the normative part and what is informative? |
+
[Wilco speaking] | +That’s a fantastic question. So, the WCAG success criteria are published by the W3C as a Recommendation. They’ve gone through an extensive process of public reviews by the W3C and they are an internationally recognized standard. + So, those are solid as anything. ACT Rules are written by smaller groups. They go through a less involved process of review and they get published still by the W3C but not by the whole of the W3C. There is no massive W3C-wide review but there is a review and an approval that comes from the Accessibility Guidelines Working Group which are the authors of WCAG. + So, you can see them as these are rules published by the authors of WCAG, whereas WCAG itself is published by the W3C as a standards organization as a whole, so. |
+
[Shadi speaking] | +Thanks. We call these – internally we call them supporting documents. These are the so-called Techniques documents, the Understanding documents - Understanding WCAG 2. So, with every success criteria, there is a document associated with it called Understanding Success Criteria XYZ that explains more background and would help and so on. + Then there are several Techniques, how you can meet that requirement, and now we also have tests associated with success criteria. + These supporting documents are informative, so they’re not normative like the success criteria themselves. That is what you have to meet but these are all – all these supporting documents help you make sure that you meet the requirements. + Good. I will take the liberty of answering two more questions and then we’ll move on. One question was when will WCAG 2.2, when will it be released? That was a teaser from you, Carlos, because you mentioned that in your slides. Currently, it’s planned for mid-2021, so mid this year we expect the new version of WCAG 2.2, and hopefully with as many test rules as possible as part of the supporting documents. + And then there was another really good question. When can we expect – can we expect the WCAG 3 guidelines to follow the same approach as ACT Rules so WCAG 3 success criteria can be tested more objectively with automation where relevant? + This is a really good question because just this – just last week on Thursday, fresh, hot off the press, we, the W3C, published the first public draft of WCAG 3.0. This is still a very, very early draft and we would love your comments. We, of course, want to make WCAG 3.0 more testable and so we want to look at as much approaches, as much carry-over as possible of the ACT Rules. At the same time, WCAG 3 has a very new conformance model. So, they are looking at different ways of evaluating, so not just binary but maybe on a scale. This is maybe a topic for another webinar about WCAG 3.0 but the thing is, yes, we are working closely to try to get – improve the testing in future versions of WCAG. |
+
Visual | +Audio | +
---|---|
[Shadi speaking] + Title Slide: Portuguese Observatory |
+ The Portuguese observatory. So, Jorge, do you want to introduce yourself briefly and then please get started. And you are muted. | +
[Jorge speaking] | +Sorry. Because the button changed the position of the mic. Let me also put my camera to see me. + Okay. Hi, everyone. I'm Jorge Fernandes from the Portuguese Agency, the AMA, Administrative Modernization Agency. We are responsible for the monitorization in Portugal of the web accessibility. We are the body responsible for that. We belong to the Ministry of the Modernization of State and Public Administration. And, well, I'm working in the field of web accessibility since 1999, so in the beginning of the WCAG 1.0. And I'm here to present you the Portuguese observatory. |
+
Slide 1: An Observatory to Raise Awareness ... | +I think I can continue now, Shadi? Okay. In the -- sorry. I will try to get the connection with my slides. Okay. Next one. So in the Portuguese legislation, we have explicitly a method of evaluation mentioned based in the simplified method of the directive. In the preamble of the Implementation Act about monitoring, I underlined these two ideas. The monitoring should raise awareness and encourage learning in Member States, and the overall results of the monitoring activities should be made publicly available. These must involve all entities, all public sector bodies. And the monitoring is not an external exercise to them. So it's something that they need to do it by theirselves. | +
Slide 2: The Directive and PT Trasposition | +That is one of the reasons why we in Portugal abide the centralized model of evaluation, a method of three steps to each entity evaluate. The simplified method based on automatic and semi-automatic tools. And in that what we call an in depth light is a manual evaluation based on a checklist that we call the ten critical aspects of the functional accessibility, such a kind of W3C Easy Check. + And, according to our legislation, this is mandatory, these two references, these two methods of evaluation, and is recommended, also, usability tests with people with disability with a minimum of one task, one users' typology of the European standards, like, you know, the EN 301 549. |
+
Slide 3: Simplified Method Based in Evaluation Tools | +About the simplified method, as you know, in the Implementing Act, the page sample needs to have a number of pages appropriate to the estimated size and complexity of the website. This is the main principle mentioned in the Directive. + In the Portuguese proposal, the page sample is composed by the home page plus all pages linked from the home page. From the historical studies in Portugal, we know that this means about 60 pages per website in public administration. + Evaluate with automatic tools to WCAG 2.1 AA. And the public sector body can use any kind of tool, but our team will load all the samples in the Portuguese observatory, and all of them is public. |
+
Slide 4: Portuguese Evaluation Tools | +All our tools used same engine, the QualWeb, that already Carlos mentioned. We have the AccessMonitor. That is a tool that produced a web accessibility report of one page. That is also in our website. And the observatory is an awareness tool with global statistics of the entities. If the entity want to know more details, they need MyMonitor. So the observatory don't give the detail of the information -- all information of the sample. To do that, the entities need to have -- to have access to another tool. That is the MyMonitor. So we give them the MyMonitor, and, of course, we include our contact network of people that is working in the field. | +
Slide 5a: Impact of the ACT-Rules | +So the impact of the ACT Rules in our tools. Until now we change 20 of 80 of our tests based in 13 ACT Rules. Get a more comprehensive analysis, a post processing browser analysis, see more -- at the moment, our tool see more HTML elements. Our tool is also ready to translate to other languages. The output is in EARL and the CSV format. We can output all the results in these two formats. + The tools are open source. All the source are in the GitHub. You can find them at the amagovpt.github.io/eed. It is the local where you found the source code. |
+
Slide 5b: Impact of the ACT-Rules | +So the biggest impact of the WAI Tools Project are in the test rules. Just to mention two examples, the headings and the images that we already spoke in this presentation today. Well, talking about the headings, in the past we have headings as traditional H1 to H6. We detect that. Now we detect more headings because we have the traditional ones and the new ones that I mention here has an ARIA-like. We have the attribute role heading in conjunction with the attribute level with 1 to 6 and maybe more headings with more levels. So all of them at the moment are detected by our tool. | +
Slide 5c: Impact of the ACT-Rules | +About the images, in the past, we only analyzed the alt of the images. Now we have four different attributes where we can put also the alternative text, the ARIA labelledby, the ARIA label, the alt, and also when we use the title alone. | +
Slide 6: QualWeb Engine | +If you want to try all the ACT Rules that we have at the moment, you can check -- you can check the QualWeb. That is in qualweb.di.fc.ul.pt. So qualweb.di.fc.ul.pt. And there you can try all the 67 -- 67 rules that we have at the moment from the branch of 70. | +
Slide 6: Demo of the Observatory | +So let's try a demo of the observatory. And now is the moment of David Attenborough. I will put running a video. Let me try. | +
Video in slide playing | +Let's try a demo of the observatory. The observatory is organized from the big picture until the drill down of one entity. So the first page of the observatory have the big picture. Global score from 1 to 10. Ten is a good practice. Total number of categories, total number of websites, pages, a ranking table of the categories with the number of pages conformed by level. + We started by entering the 308 Portuguese municipalities with the sample home page plus all pages linked to the home page. We have more than 50,000 pages introduced at the moment. We have data graphics as histogram of scores and the ten most frequent errors. + Let's go now to a category, for example, the category of the municipalities. Second level, the level of category or directory, in this case, the municipalities. The same organization of the first level with the score, the statistical graphics, the total number of websites, the ranking table of entities. + Let's see the municipality of Murca. The average score of 9.9, the total number of pages, 243, three pages conform with level A, and 226 pages conform with AAA. It's a good one. Let's see the Murca municipality in detail in the third level. Again, the same organization of data. Histogram, the ten most frequent errors, the top five errors by conformity level, an accessibility plot in a radar graphic. The more full the circumference is, the better. And a table with detailed error distribution. Work in progress. An observatory with more info and with a new look. The evolution of page conformity on the left graphic, the accessibility plot on the right. The histogram of scores. The bad practices, but also the board of good practices with the better practices by WCAG level. And, also, the detailed distribution of all the better practices found. + Let's try a demo of the observatory. |
+
Slide 7: The Accessibility Statement | +And now I have also a demonstration of our accessibility statement that is made based on -- in the WAI Tools accessibility statement. We have a crawler to pass through all websites of the public administration, and we can collect information about the conformity level of the -- that is mentioned in the accessibility statement. And our goal is to collect all the information because our accessibility statement is machine readable, so it is possible with a crawler to get this information automatically. + And I will show you also what we can also do with the machine-readable reading, for example, an accessibility statement already published and use it to create my accessibility statement. Let's see another demo. |
+
Video in slide playing | +A demo of the Portuguese statement generator. Technically the Portuguese generator is based on the WAI Tools generator. Imagine that you are navigating in the ePortugal, the citizen portal of Portugal. And you want to found the accessibility statement. You know that if you use the suffix "accessibility" in the URL, "acessibilidade" in Portuguese, it is supposed to find the accessibility statement. And here it is, the accessibility statement, with the references of the analysis done, following the model of the directive and the Portuguese legislation. + At the bottom of all statements, we found a link to the generator used to do it. My goal is to create the accessibility statement to my website. I can use the button "upload by URL." I can create my accessibility statement based on the one of ePortugal. I enter the URL of ePortugal statement page, I press OK, and is done. And I have the form with all the data of the ePortugal accessibility statement. + Now I change the data according to my organization and website, press the button "Preview and Download," and the statement is already with my data. Well, more or less, because in this demo I only change one field, the name of the organization. Then I press the button "Download HTML." + I got the HTML format. Only the structure of the document, not the styles. I copy paste this HTML code to my website. And when I did that, my accessibility statement got the style of my site, and everything is in place. And we got another accessibility statement machine readable, in this case, based on the ePortugal. |
+
[Jorge speaking] | +Okay. So a demo of the Portuguese statement generator again. At the moment, we -- with the crawler, it is possible from data from today we know that we have 32 accessibility statements. We do 16 compliant, eight partially compliant, and eight no compliant is some of the data that it is possible to get automatically from the accessibility statement. | +
Slide 8: Thank you | +So if you want to see live the Portuguese observatory, you can use the URL observatorio.acessibilidade.gov.pt. So observatorio, O B S E R V A T O R I O. + And that's it for my presentation. Thank you very much. |
+
[Shadi speaking] | +Thank you, Jorge. Yeah. So, actually, we have a question right away. The question is -- | +
[Jorge speaking] | +Don't say. Yes. | +
[Shadi speaking] | +If we want to install the testing service to our own server, do we need -- oop, and I moved -- do we need the QualWeb core in GitHub only, or what is the purpose of the monitor service, et cetera, repos in ama.gov.pt repos? So this is a bit of a technical question. | +
[Jorge speaking] | +Yeah. Maybe Carlos can help me. | +
[Shadi speaking] | +About the installation of this. Somebody wants to use it locally. Carlos? | +
[Carlos speaking] | +Yeah, sure. I can help, definitely. You don't need to install any QualWeb related service to have the observatory and the tools in the observatory ecosystem working. The monitor server that you mentioned in that question includes the QualWeb core. So everything that requires an accessibility evaluation is handled through the monitor server, which is also tasked with translating the outcomes of QualWeb into the formats that the tools in the PT observatory ecosystem use. So everything that you need to have your own copy of this ecosystem is available from the AMA GitHub repository. | +
[Shadi speaking] | +Right. So that's one particular aspect is that this entire observatory and all the tools are completely open source, and I'm sure they welcome contributions as well. + Now, Jorge, another question was: I don't understand why a new accessibility statement can be made by copying a published -- an existing accessibility statement. Can you please clarify? |
+
[Jorge speaking] | +Yeah. You know, one of the things in the -- when you need to fulfill the accessibility statement is to understand what is in different parts of the statement. So when you can visualize a final declaration already full field, we know that this help also to adapt and to fulfill our own declaration. So it's something that it is helpful to do that, and it is also when you already have the declaration in your website and want to do changes, you need -- you also can use the same method that I used here in the demo to make changes of your own declaration. So it is a question of it's more easier to understand how to fulfill the new declaration and also help to modify our own declaration. | +
Visual | +Audio | +
---|---|
[Shadi speaking] + Title Slide: Reporting Support Tools |
+ For the reporting support tools, I would like to call Eric Velleman from the Accessibility Foundation to come to the virtual stage. Eric, please introduce yourself and take it away from here. | +
[Eric speaking] | +Okay. So, hi. I'm Eric Velleman. And let me first start up here on my desktop here. So I hope you all see this. That should be it. + So I'm Eric Velleman. I'm from the -- I work at the Accessibility Foundation in the Netherlands. We worked on the ACT Rules and on the reporting support tools. So I'll be presenting the two tools that we worked on recently. Both tools are undergoing an update. And in this short -- the two short presentations that will follow now, I'll show you what the updated tools will look like. And probably -- and I hope you can see them soon on the W3C website. |
+
Slide 1: Web Accessibility Reporting Tool (1) | +Okay. So first the WCAG EM Report Tool. So it used to be the -- does everybody see this? | +
[Shadi speaking] | +Yeah, we see your screen, Eric. | +
[Eric speaking] | +Okay. Good. So it used to be this left thing here, which is if you still go to the website, you will still see this one. And in the meantime, we've been working on this one, the one on the right. So it's the updated version. | +
Slide 2: Web Accessibility Reporting Tool (2) | +And to get an overview, it can be found at w3.org/WAI -- W A I -- /eval/reports -- how do you call that small line tool -- forward slash. + And what we did is we follow the procedure to evaluate websites in the reporting tool. It helps you generate a report according to WCAG EM. It supports and links to relevant information and tools. It aligns with the WCAG conformant claims. And it supports WCAG 2.0 and WCAG 2.1. It doesn't support WCAG 3 yet. And it can also show the difference between the two. So if you had a previous one in WCAG -- a previous evaluation in WCAG 2.0, you can just add -- just only show the WCAG 2.1 success criteria. |
+
Slide 3: Web Accessibility Reporting Tool (3) | +It is a manual tool, so it doesn't do any automated checks by itself, but it does have the possibility, the new version, to import automated checks. So if you use Axe or Alfa by Siteimprove, you can import the results as long as they comply with the JSON standard for the import. + It has the possibility to open and save input, so you can exchange results. So at the end of an evaluation, you can sort of save your evaluation, send it to a friend, and the friend can import the evaluation and continue with it or check it and send it back to you or -- well, in this way you can work with multiple people on an evaluation as long as you don't work on it at the same time. + It supports and links to relevant information tools. I think I said that in the previous slide. It's an easy translation file. So it used to be rather complex to translate, but now it's -- it has an easy translation file, so that makes it easy. There is an English file. If you translate everything that is in English to your language, you will have your own language file or your own language version of the reporting tool. + It has changed the new WAI design and framework, and it has the possibility of importing data from tools using the EARL format. + So now let's shift to the live demo. It should be this one. Yeah. |
+
Live demo of Website Accessibility Evaluation Report Generator | +So this is the live demo of the tool that is currently still on GitHub. And we are working on it, so you will see that you have the same walk through here. So the Overview, Scope, Explore, Sample, Audit, Summary, and View Report are the menu items. And it explains you how the tool works. It gives tips for using it. If you go to it, at the right there's sort of a menu bar. You can view the reports, start a new evaluation, open an evaluation from a JSON file, or import data from automated evaluation tools. + So to start a new evaluation, you just click the button, and then you go to the first page, Scope. Well, here you can -- this is not really different from the previous version, only that it also has a WCAG 2.1 and it works a bit better. It has all the parts that were in the previous tools except, of course, the nice part is this one. I should show you the translation file here. So I should push Netherlands. You'll get the Netherlands version of the tool, which is, of course, great. If you want your language there, you just translate the file, and ask somebody at W3C to put it in the right folder. And then all of a sudden, you will have your own language web accessibility evaluation report tool generator. + Here, the sample. You can audit the selected sample, which is, like, here with perceivable. You can sort of declare something as passed, failed, cannot tell, not present, or not checked, et cetera, for all the guidelines and success criteria. You can add information for a summary, like the name of the evaluator, the evaluation commissioner, the date, executive summary. And then, finally, you can sort of view the report. Multiple buttons to do that. And it will generate the complete report with all the things that you just input in the previous pages. + And on this page is the interesting button. Save the evaluation data as a JSON file. So if you click it, you'll see here at the bottom it saves an evaluation.json. And if you go to the first page, you can then open the evaluation from JSON. I won't do that here because you'll see all the documents on my computer, but trust me, it works. + That's the website accessibility report generator live. But, once again, great translation. |
+
[Shadi speaking] | +So you just changed from English to Dutch. Thank you, Eric. + So there are already a couple of questions and just some additional visual description. So this is not the final design of how it will look like. It's the kind of tool Eric had read out the URL. And you can find that URL also from the project page and from the agenda, the report tool. We're updating it primarily to support translations more easily, but also to put it in the new WAI style. Many things here is not going to be the final way it looks. It's still a work in progress. And so, yeah -- yeah. + There's several questions, Eric, that -- if you can stop moving around because it's distracting me. |
+
[Eric speaking] | +Sorry. | +
[Shadi speaking] | +Moving content is distracting. | +
[Eric speaking] | +Oh, sorry. | +
[Shadi speaking] | +Yeah. So can you clarify the difference between open and importing data? | +
[Eric speaking] | +Yeah. So open is to open a file. So I just showed you at the end of the evaluation you can save your evaluation report in the form of a JSON file. And somebody else can open your report. + So an import, that is to import data from automated tools. So like Axe, for instance. So you can use a tool to evaluate a website, and then the evaluation results can be imported into the WCAG EM Report Tool. The small angle there is that the only thing that will be imported currently are the URLs that are in the sample. So it will choose from all the URLs that you have checked with the automated tools. It will only use the results of the pages that are in your sample at the moment. |
+
[Shadi speaking] | +Excellent. Thank you. And then some more technical questions regarding the underlying data. Somebody is concerned. The underlying JSON data, will it stay the same in the new version? | +
[Eric speaking] | +Yeah, as far as I know. You know, I'm not sure about that. So you mean if you have JSON data from the previous tool? | +
[Shadi speaking] | +Yeah. No. The reports you have so far will stay valid. So, yeah, so the functionality, the importing and the opening and closing will be the same, and the JSON format will remain unchanged. + Okay. Another question that came also in the -- similar question was on the registration. Can you export -- so we can now export an HTML and in JSON format for machine readable. Are there other formats, for example, pdf or Word or Excel or something? |
+
[Eric speaking] | +No. No. Just HTML as far as I know and JSON. | +
[Shadi speaking] | +Right. We do have GitHub where you can put wish list requirements or you can open issues. Also, if you find bugs, please do report these, and we will -- yeah, we will see what we can do in the future. But right now, yeah, we do not export to other formats. + There was a question: Can you show an example of importing an Axe EARL report? So this was about reporting from -- importing from automated tools. So on -- I think you said you don't want to show the files on your computer, which is fair enough. I think the best we can do is there's -- the EARL GitHub -- and I'll give the URL later to the EARL GitHub. I think it should be linked from the WAI Tools Project page as well, and there -- there should be some sample reports there that you can use from there. + Okay. Eric, does the report generator evaluate web or mobile apps? |
+
[Eric speaking] | +Web. | +
[Shadi speaking] | +And it's WCAG, so it's independent. So, actually, the important thing is this is not an automated tool, so it actually uses all the criteria, so you can use it just as well to create reports for the apps. | +
[Eric speaking] | +At Accessibility Foundation, we use the same tool to evaluate apps. | +
[Shadi speaking] | +Right. Good. Good to hear. Wow, there's a whole bunch of questions coming in. We want to move on. When this new version will be released? That's a really good question. Yeah. We hope in the coming week or two. So please hang on. We'll let you know as quickly as possible. At the end of the presentation today, at the end of the session, I'll be giving you information on how you can receive updates, how you can be notified, and so we'll let you know as soon as the new version is available. + Another question: Austria and Spain uses the tool from monitoring bodies and make changes. Is there any exchange between your organizations? So, yes, we did actually ask for inputs through GitHub. And I believe we got comments from Austria, I believe, maybe directly, maybe indirectly. And so, yeah, we developed this, you know, openly. Of course, we always love more input so that we can consider any requirements in future versions. +Then another question. Would it be easy to add non WCAG criteria to the report generator? For example, other clauses from the EN, from the European standard of the directive. |
+
[Eric speaking] | +Well, I mean, if you have a programmer then I think it would be easy maybe. But it's -- yeah. | +
[Shadi speaking] | +It's open source. So, yeah, you would be -- yeah. It's -- | +
[Eric speaking] | +All on GitHub. | +
[Shadi speaking] | +Not out of the box, but you should be able to adapt the -- okay. And then last question before we move to your next presentation, Eric. We often have multiple analysts working on evaluating a site at the same time. Is there any way to break out of a page for evaluation and upload these results back to the main evaluation so they could work in tandem so that somebody -- I understand the question that somebody evaluates part and then the other evaluates other parts and then we can combine the reports. | +
[Eric speaking] | +No, not if do you that simultaneously. So if somebody stops evaluating and another takes up, then you can send each other the JSON file, and the other one can continue. But if you -- as far as I know, if you both do the evaluation, you import the file. I'm not quite sure what happens then. So could be that it is added. Hmm. I'll put it on the list of things to check. | +
[Shadi speaking] | +Right. Yeah. And then information in Germany, we also use the tool and make changes, but we're still at work. So, obviously, using that and making changes to it, yeah, welcome to -- we also welcome translations | +
[Eric speaking] | +Yep. | +
[Shadi speaking] | +For all resources that you've talked about. There were some questions on translations that I'll come back to later again in the Q&A. Yeah. So -- oh, and somebody -- at the moment we add an Excel converter. So that's nice to hear that there's something like that. Again, this is an open source project, so you're always welcome to contribute things back into GitHub if you wanted to. + There are several issues in GitHub of people asking to have, for example, this collaborative so that multiple people can edit at the same time. We know that this is a feature request, but that's actually quite a project on its own. For now we started really with just this import to allow when you select a sample of pages from your site and you have automated results, you can fill in what the tool knows, but you still need to continue doing the manual part of your work. But it helps combine automated and manual testing. + Okay. We're a bit behind schedule, Eric, so please go ahead and continue. |
+
[Eric speaking] + Tile Slide: Accessibility Statements |
+ Okay. Let me get my presentation back. Get you back. Share screen. So there we are. + The accessibility statements. Well, this is a shorter story because we did a lot more work on the evaluation tool, and not just adding translation -- the possibility of easy translation there, but also adding a lot of functionality like the imports and like the saving and opening files and to be able to share evaluations. |
+
Slide 1: Accessibility Statement Generator (1) | +So for the accessibility statements generator, it is still the version that you can see live on the W3 website. If you search for statements generator on W3, you'll find it immediately. It's at W3.org/WAI -- W A I -- /planning/statements and slash. | +
Slide 2: Accessibility Statement Generator (2) | +And there's a page there to generate an accessibility statement with a lot of information on the Preview button, but you still have to fill in everything by hand. So what the generator does is it guides you on providing accessibility statements. It is aligned with the EU Web Accessibility Directive and with WCAG 2 Conformance Claims. It is a manual generator, so this one does not do any automatic checks. But you could link to the evaluation reports or to a JSON file, but you output it from the previous tool. + The output is available in both human and machine readable formats. The tool includes examples of statements, guidance, how to make accessibility statements. Most people are, like, what is important for people with disabilities? What would they like to see in a statement like that? And a statements generator tool. It includes input from communities and experts, and it is almost ready for translation to your language because we're still working the last bits of that. So, also, in, like, a week or two, this should be done. And then you could just take one file, translate everything in that file, and you would immediately have your language version of the accessibility statements generator. The only thing that has to be done is that somebody at W3C will then have to put the translated file into a folder. |
+
Slide 3: Accessibility Statement Generator (3) | +So why would you provide an accessibility statement? Well, that is to show users that you care about them and about accessibility, to provide them with valuable help, for CSR reasons, or it may be a requirement, like in Europe for the Directive, for public sector bodies. + And what does the tool then do? Well, the tool asks you the questions you need to answer. So it asks you basic information, your efforts, technical information about the accessibility, and regarding the approval and complaints process. It helps create an accessibility statement that can be further customized and branded, et cetera. So you can just save the end result and make your very much most -- more beautiful version out of it. + It helps make accessibility statements conformant with the EU Directive. And the output is not for lawyers, but for users of your content. So make sure they understand so they know what to do. That was the idea behind the accessibility statement generator. + And the future work is the translations. We're working on it now. And we also looked into the possibility to add your own questions. That was one of the issues. So if you go to the GitHub page, you'll find a lot of proposals there for changes to the accessibility statements generator. We had to prioritize. So besides a few other things, the most important thing was the translations. But all the code is there. So, also, in this case you could take the complete code and add your own questions. And I think in the case of the accessibility statements generator, that would really be a fairly simple thing to do. I think even if you're not really extremely technical you could probably do that. |
+
Live demo of Generate an Accessibility Statement | +Okay. Now for the live demo. Let me go here. So this is the live page on the W3 website. And it's at W3.org/WAI/planning/statements. And then if you go to /generator and then slash -- well, then you will get to this create page, where it asks you basic information like the name of your organization, which could be like this, and then a telephone number or address of your website. That's not a correct one. Over here the standards applied, the conformance status, the additional considerations. Don't send them me. Et cetera. + So there's the efforts you did, like organizational measures you took or any other measures, you can add them here. Technical information like the accessibility limitations, compatibility with user environment, et cetera. So it's all -- this is all described in detail. If you show info here, below every header or form fields, there is -- or above every form field, there is more information about what you could fill in there. The technologies used, et cetera. + Then somewhere at the bottom you can preview your accessibility statement, and it takes you to your accessibility statement. So I haven't filled in anything, so now it's website name. But if I would go back, let's see here. I will put at least a name in here. Oh, I did put the name in here. Terrible things of life. Okay. Well, here it is. So I probably forgot something. It does the description of the name. So it's a few forms later. So it will generate -- everything that you fill out will be appearing below here. So that is why it is currently very short, but the more information you input, the more information you get in this accessibility statement. That's it. + And then above here -- of course, before I say that's it, I should have said somewhere here above you will soon see this translation button where you can get the file, translate it to your language, and then see your own language version of this accessibility statement. Yeah. |
+
[Shadi speaking] | +Thank you, Eric. Yeah. So right on time. We have a couple of questions that I think are a little bit beyond the scope of this project, but they're really good questions. + So one question is: I've seen public sector websites publishing accessibility statements only to conform with regulations. In those statements they say that some parts are not accessible. What's the use of having such statements? + So, yeah, that's a really good question. And this is one of the reasons why, you know, in addition to the generator, we have guidance, including examples of accessibility statements, to really try to promote good practice. But, yeah, this issue, I guess, of what people do or don't is a little bit beyond our scope. + Another question that also Jorge might be interested to weigh in. It was saying: I was interested to learn that in Portugal there's a proposed standard URL pass for accessibility statements. So on a public website if you write "acessibilidade" in the URL, you should get the accessibility statement. Are there more countries with such standard locations? + Eric, first, are you aware of other countries that have a similar practice? |
+
[Eric speaking] | +Yeah. I know that the Netherlands has a standard location where you can find all the accessibility statements. That is at the toegankelijkheidsverklaring.nl. And you can find enough of you of 2,500 toegankelijkheidsverklaring. And that is also the place -- sometimes they're not even on the website or you can't find them on the website of the public sector body, but you can find them on this website that is hosted by the central government. | +
[Shadi speaking] | +Jorge, any other thoughts on this? | +
[Jorge speaking] | +Well, I don't know other countries where they use also the standard URL. But, yes, in Portugal we are using this one. For example, I know that in Italy they centralized the accessibility statement in a website, and you make a link to that website. The accessibility statement is not on the website of the -- of the public sector bodies, so it is outside, centralized in a server managed by the national authority. But I don't know other -- or other examples. | +
[Shadi speaking] | +Thank you. Yeah. And that goes directly to the next question. Do all countries adopt this statement generator, knowing that some countries in the EU have additional requirements, for example, France and Netherlands? The latter also has its own generator. And then there is the Dutch name, which I cannot read. | +
[Eric speaking] | +Toegankelijkheidsverklaring.nl. | +
[Shadi speaking] | +Yeah, be nice to the captioner, Eric. | +
[Eric speaking] | +Oh, that’s trouble. | +
[Shadi speaking] | +Having a match. | +
[Eric speaking] | +Woops. | +
[Shadi speaking] | +Okay. So we do not -- yeah. We provide this accessibility statement generator as an example as the minimum requirements in the Directive. Also something internationally -- this is not only for Europe, but providing accessibility statements as a best practice internationally. And the expectation is whoever wants can take it, adapt it. So this is why Eric mentioned that you can actually add additional questions, so we try to write the code in such a way that you can extend this basis and also translations so that you can use this separately. + So, yeah, it's a tool to help. Some have reused it, like in Portugal. Others have not. And it's totally up to -- you know, to people what they do with it. We did notice -- we did learn from Portugal that the translation was not as easy, and this is why we're redoing that, to support translation. +Good. And then somebody re -- put the Dutch URL again. I think somebody wants to have fun with Dutch. Good. Thank you very much, Eric. |
+
[Eric speaking] | +Sorry. | +
[Shadi speaking] | +Dutch lesson in the next webinar. Let's now move on. | +
Visual | +Audio | +
---|---|
[Carlos speaking] + Title Slide: Test Data Browser |
+ All right. So as Shadi mentioned, earlier in the project we tried to think of ways that we could use all the data that might come out of tools that implementation rules and other open data formats, and it was one of our goals to propose a prototype that could show how we could browse the large amounts of accessibility testing data that might become available in such a future. | +
Slide 1: Objectives | +So for this, we tried to look at this from the perspective of someone that's responsible for monitoring efforts, and so we asked ourselves: How can we leverage the support of open data for those monitoring bodies? And we came up with this prototype, and we emphasize this. It is a prototype. It's not something that's production ready, but we developed this as a way to support future explorations of what can such a tool offer. | +
Slide 1: Objectives | +So let me start by addressing what types of data did we consider for this prototype. So to begin with, we need to get data from somewhere, and we can see there are two sources. First, the accessibility evaluation reports that we have been listening to. So these evaluation reports are written using EARL so that we can import them. So something that's an output of an Axe evaluation and Alfa evaluation or a QualWeb evaluation, all tools that can output their evaluation reports in EARL we could import that to this browser. + And, second, all the accessibility statements. We included statements that use the open format, both the one that Eric just presented and that was developed as part of the project, but also the one that we adapted to be used in Portugal. + And from both of these sources, we collect an interesting variety of data. So we extract from these sources data on success criteria, on the elements that have been assessed on the ACT Rules that were used to check if the tool uses ACT Rules, the assertions that were made, and the outcomes of those assertions. And so that we can browse this, we have also added the five -- a set of metadata that can be used to categorize and support this evaluation data. So here we can see the continent, country, the category of the website or the mobile app owner. So if it's a private or a public entity, the sector where the entity operates in, and even what evaluation tool it was used to produce the report. |
+
Slide 2: Features | +And we in the tool designed a number of visualizations. So you will be able to see -- we'll do a short demo after the slide. So most data is presented using bar charts with -- that are grouped by the assertions or the success criteria. + We -- besides the bar charts, we have a table. We provide access to a table with the equivalent data of the bar charts, but we made an effort so that all the charts are keyboard navigable, and we also include a timeline view so that it's easier to get this grasping of how the different metrics have evolved over time. |
+
Live demo of PLACM | +And I think it's best to illustrate the different ways that we can interact with the demo, with the data, by showing you a demo. So let me switch to new -- a new window. Okay. And so now this is the -- the initial page that you access when you enter this tool. + Here we have -- let me start with a disclaimer. All the data that's being presented in this demo was randomly generated, including the name of websites and the evaluation tools, so everything has been randomly generated. And the volume of data that we have here is representative of the number of evaluations that a country the size of Portugal can expect to have in their monitoring process. + So as I was saying, this is the initial visualization. You get a chart grouped by pages that have been evaluated over different continents. We start at the continent level. You can include information about the assertions, the passed assertions, the failed assertions, the ones that the tool can't tell if they have passed or failed, the inapplicable ones, and the untested ones. So similar to what Eric just showed previously. + You can, instead of showing assertions, show the information based on success criteria. So passed, fail, can't tell, inapplicable, and untested criteria. And let me go back to assertions and move this Zoom window to another place and show you the data table that we have. Basically, the same data that you have in the bar charts can be presented in the tabular format. + Okay. So if you want to see data grouped by country, you just pick that on that sidebar. And now that's -- instead of just seeing, okay, like, seeing everything on the sector page, I just want to know information about the two -- the testability of the private and public sectors in the British Indian Ocean territory. So I just select that bar, and I can drill down on sector information on the model window that shows up. And now I have the information about just the British Indian Ocean territory grouped by private sector and public sector, and I can compare that. + So if I want to see all data about sectors, I can just press there. And I see, okay, I have over 4,000 pages on the private sector and over 3,000 pages on the public sector. + And now let's say I want to compare data of the public sector onto specific countries. So instead of drilling down, I want to group by this -- a way to group this kind of data using another category and, say, okay, let me group this, for instance, France and Luxembourg. Okay. So I can now see the data on public sector bodies only. So we're comparing sector public grouped by two countries, France and Luxembourg. And in this visualization I can have them both at the same time. + Let me move to another class. Let me see. We can see tags or categories. Here we have, once again, randomized data. Don't forget if I want to see how health how the health sector has been doing over time, I can just, once again, select this on the bar charts. Sorry. And then I can go and do -- and select the timeline option, which will show me for every month that I have collected data on this -- on websites or mobile apps that have been tagged as health, I can see the data here. + Let me go back to tag and show you how I can compare, for instance, data on the health and the media sector. So here I can use the comparison and health, which is the one that I've selected it already. Previously selected. And now I can also select media, and the comparison page will show me, okay, the number of pages that have been evaluated and the passed assertions, failed assertions, can't tell assertions, inapplicable, and untested assertions for both sectors side by side so that I can compare them. + Let me just present one more feature. If I go -- this is specific at this level. If I go to the application website and I change my visualization from the assertions to the success criteria, now I can have access to specific tests of each success criteria for one application. So we can see here for the front applications of the fair success criteria, and I'm going to select here this application or website, whatever. + And I have this -- it's only in this view that I have this option here, the Details option. So if I go to the Details page, now I have -- okay, for this application or website, I know that passed the success criteria, and I can see the different tests that tell me that, and I see that it failed this success criteria. And, once again, I have access to the different tests where it fails. + So we can, in fact, go all the way up from a grouping of the data by continent to specific assertions of one application. So this shows us the range that we can have just by visualizing data that's made available from this -- from these tools. |
+
Slide 3: Thank you | +So let me go back to the slides and just -- I've finished my presentation. + So all of this is open source. You can play with this demo. It's available at www.qualweb.di.fc.ul.pt/placm/test. The source code will be really soon available at this rep. It's not there yet because we're moving this to a dedicated repository for this -- for these tools. So you will be able to find it at GitHub.com/carlosapaduarte. That's -- C-A-R-L-O-S-A-P-A-D-U-A-R-T-E /placm. Okay? + And if you have any questions or any problems getting this to run, you can get in touch with me at caduarte@campus.ul.pt. + And thank you. I think I've gone a little bit over my time. Sorry for that, Shadi. |
+
[Shadi speaking] | +No. You're spot on. Thank you, Carlos. + And as I told folks earlier on, this is a bit more aspirational part. So just to recap, we looked at, you know, in the morning of these many, many small rules, right, that test very, very specific parts. And we looked at so called implementation reports for these. So how do we collect information from these different rules and the tools and the methodologies implemented? And then if we can aggregate that on a, say, monitoring body level or, you know, even more widely, can we collect all this data, all this information? + I understand this seems a bit more futuristic, but, you know, why not think about that. Public websites are required to be public. The monitoring of their reports is required to happen. If we can provide this open data, we can look more -- and this is not about shaming people. This is about analyzing where are there issues, which particular sectors need more support, need more training and advocation, and, you know, Member States or countries or even organizations. It could be on an organization level could use that to improve their actions and what they're doing to make sure accessibility is implemented. + So the key word here is EARL, the evaluation and report language. This is a format to write down test results, basically. I'll read out the GitHub URL for that, where you can go and see that syntax for writing such results. So it's github.com/w3c/earl, E A R L, Evaluation And Report Language. + Okay. We had a question for you, Carlos. My master's student is studying the Portuguese public libraries website accessibility. He has full list of public libraries' addresses. Can he import the list to test data browser and do a summary analysis of all of them? |
+
[Carlos speaking] | +No. No. Because this, too, does not do the accessibility evaluation. If you have the accessibility evaluation of all of those libraries in EARL format, as Shadi was just mentioning, yes, then you can import those reports and use this tool to go over and do the -- what kind of analysis that you want to do. | +
Date: Updated 9 April 2024.
+Editors: Shawn Lawton Henry and Rémi Bétin.
+Developed with input from the Education and Outreach Working Group (EOWG). Developed with support from the WAI Expanding Access project, funded by the Ford Foundation. Updated as part of the WAI-CooP project, co-funded by the European Commission.
+--- + +{::nomarkdown} +{% include box.html type="start" title="Summary" class="" %} +{:/} + +This page provides general instructions on translating WAI resources. + +For a list of existing translations, see {% include link.html to="/translations/" text="All WAI Translations" %}. + +To get announcements related to WAI translations, subscribe to the WAI Translations mailing list by sending e-mail to public-wai-translations-request@w3.org with subject: “subscribe” + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::options toc_levels="2" /} + +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include_cached toc.html type="end" %} +{:/} + +Thank you for your interest in translating resources from the World Wide Web Consortium (W3C) Web Accessibility Initiative (WAI). + +## Translation Instructions + +**Scope:** +* These instructions cover web pages with a URI that begins with www.w3.org/WAI +* For web pages that begin with www.w3.org/TR/ or something else, there is a different process that is introduced in [TR & Authorized W3C Translations below](#tr). + +**Translator background** — we prefer translators to be: +* native speakers +* familiar with accessibility terminology in their language +* comfortable editing a file with code + +To avoid overlapping work: +* please do **not** translate files from the web +* follow the [step-by-step guide](/about/translating/resources/) to _get the right file to translate_ and to ensure that the resource is ready for you to translate. + +### If you want to translate a WAI resource: + +- Follow instructions in [[Step-by-Step Guide to Translating WAI Resources]](/about/translating/resources/). + +### If you want to volunteer to review a translation: + +- Follow instructions in [[Reviewing a Translation]](/about/translating/reviewing/). + +We encourage you to keep up on related translations work by [subscribing to the WAI Translations mailing list](mailto:public-wai-translations-request@w3.org?subject=subscribe). + +### Important notes + +#### Translation Agreement + +By submitting a translation, you agree: +* To the redistribution terms of the [W3C Document License](https://www.w3.org/copyright/document-license-2023/). Your translation may be republished by the W3C or other entities if it is done in compliance with the License terms. +* That the W3C may rescind your right to publish or distribute the derivative work if the W3C finds that it leads to confusion regarding the original document's status or integrity. ([Source](https://www.w3.org/copyright/intellectual-rights/#translate).) + +#### Reviews + +Translations will be reviewed before they are published. + +#### Names and Links {#links} + +This policy is based on [Internationalization Links](https://www.w3.org/International/i18n-drafts/pages/translation.html#linkingrules), which provides some background. + +Translations can include: + +* Translator's formal name, common name used online, &/or Twitter handle. + * Link to information about the translator as an individual, such as "about" page on personal website or biography page on a scholarly website. +* Organization name - translator's employer &/or other sponsor/funder of the translation. + +Cannot include: +* Links to organizations. (Exception: Qualifying accessibility/disability organizations or translation organizations. To request an exception, e-mail [wai@w3.org with subject [Translations link request]](mailto:wai@w3.org?subject=%5BTranslations%20link%20request%5D).) +* Links to personal home pages rather than "about" pages. + +#### Updating Resources + +When the English version of a resource is updated, we will inform translators what has changed, and request that translators update their translation. If original translators do not respond before we need the update, we will invite others to update the translation. + +In some cases, we will add the updated English to the translation while awaiting an update. If the changes are substantive, the translation may be removed until an updated version is provided. + +#### W3C Translations Information + +More information is available in [W3C Translations](https://www.w3.org/Consortium/Translation/) and in W3C Intellectual Rights FAQ, particularly under the questions starting with can I translate one of your specifications into another language? + +## WAI Translations Mailing List {#mailinglist} + +* **To subscribe**, send e-mail to [public-wai-translations-request@w3.org with subject: subscribe](mailto:public-wai-translations-request@w3.org?subject=subscribe). +* To unsubscribe, send e-mail to [public-wai-translations-request@w3.org with subject: unsubscribe](mailto:mailto:public-wai-translations-request@w3.org?subject=unsubscribe). + +You can see past messages from the [WAI Translations List Archives](https://lists.w3.org/Archives/Public/public-wai-translations/). + +There is also a broader W3C Translators list. To subscribe: [e-mail to w3c-translators-request@w3.org with subject: subscribe](mailto:w3c-translators-request@w3.org?subject=subscribe), archive: [W3C Translators List Archives](https://lists.w3.org/Archives/Public/w3c-translators/). + +## TR & Authorized W3C Translations {#tr} + +Web pages at URIs that begin with www.w3.org/TR/ (for "Technical Report") follow a different process described in [W3C Translations](https://www.w3.org/Consortium/Translation/). + +Most translations are informative and unofficial. In cases where standards translations are meant for official purposes, they may be developed as Authorized W3C Translations according to the **[Policy for Authorized W3C Translations](https://www.w3.org/2005/02/TranslationPolicy.html)**. Generally only completed W3C Recommendations and Working Group Notes are candidates for Authorized W3C Translations, including the WAI guidelines. The authorized translations policy is designed to ensure transparency and community accountability in the development of authorized translations under the oversight of W3C. + +* [WCAG 2 Translations](/standards-guidelines/wcag/translations/) lists in-progress and completed translations of Web Content Accessibility Guidelines (WCAG) 2.1 and 2.0, including unofficial translations and Authorized W3C Translations. +* [Authorized Translations of W3C Recommendations](https://www.w3.org/Translations/authorized.html) lists completed Authorized W3C Translations of WCAG 2.1, WCAG 2.0, Authoring Tool Accessibility Guidelines (ATAG) 2.0, and others. diff --git a/pages/about/translating/resources.md b/pages/about/translating/resources.md new file mode 100644 index 00000000000..a5b55ad7fde --- /dev/null +++ b/pages/about/translating/resources.md @@ -0,0 +1,275 @@ +--- +title: "Step-by-Step Guide to Translating WAI Resources" +nav_title: Translating WAI Resources +lang: en +last_updated: 2024-05-27 +description: Help make the Web accessible to people with disabilities around the world. We appreciate your contributions to translating W3C WAI accessibility resources. + +permalink: /about/translating/resources/ +ref: /about/translating/resources/ +redirect_from: + - /about/translating/step-by-step/ + +image: /content-images/about/social-translations.png +feedbackmail: wai@w3.org + +footer: | +Date: Updated 27 May 2024.
+Editors: Shawn Lawton Henry and Rémi Bétin.
+Developed as part of the WAI-CooP project, co-funded by the European Commission.
+--- + +{::nomarkdown} +{% include box.html type="start" title="Summary" class="" %} +{:/} + +This page provides step-by-step guidance on translating WAI resources. + +For more general information, see [[Translating WAI Resources]](/about/translating/) + +To get announcements related to WAI translations, subscribe to the WAI Translations mailing list by sending e-mail to public-wai-translations-request@w3.org with subject: “subscribe” + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::options toc_levels="2" /} +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include_cached toc.html type="end" %} +{:/} + +## Overview + +There are 6 steps to contribute as a volunteer translator: + +1. **[Find a resource to translate](#find-resource)** + +2. **[Inform us of your intent to translate a resource](#intent)** + +3. **[Translate the resource](#start-translation)** + +4. **[Preview and check your translation](#preview)** + +5. **[Ask for reviews](#review)** + +6. **[Wait for publication](#publication)** + +Before starting your work, please take the time to read the information in [[Translating WAI Resources]](/about/translating/), and verify that you are willing to contribute under these policies. + +## GitHub + +We encourage you to use GitHub, especially if you intend to become a regular translator. + +Discussions, collaboration with reviewers and progress tracking are more easily managed in GitHub. + +- If you do not already have a GitHub Account, [sign up {% include_cached external.html %}](https://github.com/signup) for an account; +- If you are new to GitHub, we recommend that you follow this interactive free course:\ +[Introduction to GitHub {% include_cached external.html %}](https://github.com/skills/introduction-to-github); +- For more specific guidance during the translation process, you can deep dive with our [[Using Github]](/about/translating/resources/using-github/) guide. + +{::nomarkdown} +{% include box.html type="start" title="Important Note" class="simple" %} +{:/} + +We hope you find benefits in using GitHub to contribute. That said, we do not expect translators to be or become GitHub experts. + +If you are unsure how to proceed and need guidance; or if you are not comfortable with GitHub at all: please send e-mail to [group-wai-translations@w3.org](mailto:group-wai-translations@w3.org) [^1]. We will be happy to guide you through GitHub or look for alternatives more suitable for you. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{% include excol.html type="all" %} +{% include excol.html type="start" %} + +## Step 1: Find a resource to translate {#find-resource} + +{% include excol.html type="middle" %} + +### Have a look at our priorities + +For suggestions on which to translate first, see [Priorities for Translations](#priorities). + +### Or pick a resource according to your preference + +You are welcome to translate any current WAI resource that you think would be useful in your language. + +The easiest way to find a resource to translate is to head over to the dedicated [translations sitemap for your language](/about/translating/sitemaps/). + +Translations sitemaps display the structure of WAI website, and indicate for each page: +- If a translation in this language has been published, and its current status ({% include_cached icon.html name="check-circle" %} Up-to-date or {% include_cached icon.html name="warning" %} Needs update); +- If a page has no translation available in this language, and therefore welcomes a volunteer translation. + +{% include excol.html type="end" %} + +{% include excol.html type="start" %} + +## Step 2: Inform us of your intent to translate a resource {#intent} + +{% include excol.html type="middle" %} + +{::nomarkdown} +{% include box.html type="start" title="New volunteer?" class="highlighted" %} +{:/} + Thank you so much for your interest in translating W3C WAI Resources! + +To begin your journey, send an email to [group-wai-translations@w3.org](group-wai-translations@w3.org) (not publicly archived [^1]) to express your interest in joining the WAI translator volunteers community. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +When you have found a resource to translate, you must indicate your interest, and wait for a reply from WAI team. We'll check that the file isn't currently being translated by someone else, and that it isn't about to be updated. + +Please wait for reply from WAI team before starting a translation. + +### Recommended / quickest way {#translation-issue} + +1. From the [translations sitemap](/about/translating/sitemaps/) for your language, click on the "Volunteer to translate this page" link, displayed next to the resource you intend to translate. +2. It will automatically pre-fill a GitHub issue with some useful information. +3. Read and submit the issue. + +### Alternatives: + - [Create an Issue in the dedicated "WAI Translations" repository](https://github.com/w3c/wai-translations/issues/new). + - If you are not comfortable with GitHub, send e-mail to the publicly-archived WAI translations list using [this e-mail template](mailto:public-wai-translations@w3.org?subject=%5Blang%5D%20Intent%20to%20Translate%3A%20%5Btitle%5D&body=I%20would%20like%20to%20translate%20into%20%5Blanguage%5D%20the%20following%20resource%3A%0A%5BEnglish%20title%5D%0A%5BURI%5D%0A%0AI%20have%20read%20the%20information%20on%20Translating%20WAI%20Documents%20at%20https%3A%2F%2Fwww.w3.org%2FWAI%2Fabout%2Ftranslating%2F%0A%0AI%20will%20wait%20for%20confirmation%20that%20the%20resource%20is%20ready%20for%20translation.). + +{% include excol.html type="end" %} + +{% include excol.html type="start" %} + +## Step 3: Start translating {#start-translation} + +{% include excol.html type="middle" %} + +### Initial setup + +1. Log in to your existing GitHub account, or [create one {% include_cached external.html %}](https://github.com/signup). +2. Get to the repository of the resource you will translate. + - Near the bottom of each page on the WAI website, there is a "Help improve this page" box. The middle button is "Fork & Edit in GitHub". That gets you to the repository. + - If you have followed the [recommended way](#translation-issue) to declare your intent, the link to the repository has been automatically added in the GitHub issue description. +3. You can directly work from there. + +For further guidance on using GitHub to translate a WAI resource, follow the [dedicated guide](/about/translating/resources/using-github/). + +### Translate the resource + +- Refer to [[How to Translate a WAI Resource]](/about/translating/resources/technical-steps/) guide + - If the page you are translating has videos, also refer to [[How to Create Translated Video Subtitles and Descriptions]](/about/translating/resources/video-subtitles/) + - If the page you are translating has images with text, also refer to [[How to Translate Images]](/about/translating/resources/video-subtitles/) +- Commit your changes. + +{% include excol.html type="end" %} + +{% include excol.html type="start" %} + +## Step 4: Preview and check your translation {#preview} + +{% include excol.html type="middle" %} + +1. Open a [draft Pull Request {% include_cached external.html %}](https://docs.github.com/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/creating-a-pull-request-from-a-fork) in the GitHub repository of the resource, with `[IN-PROGRESS]` at the beginning of the title. + +2. After you submit the pull request, a Netlify preview will be generated so you can check your file and make edits. + - At first it will say **"👷 Deploy Preview for _wai-repo-name_ processing."**. + - When done, it will say **"✅ Deploy Preview for _wai-repo-name_ ready!"** and a "Deploy Preview" link will appear. + +3. Click on the preview link: + - Check everything listed in [Reviewer Guidance](/about/translating/reviewing/#initial-things-to-check); + - Eventually, commit some fixes; + - At this point, if you encounter some technical problems, ask for help from WAI team. + +4. When your auto-review is done, go to the next step. + +{% include excol.html type="end" %} + +{% include excol.html type="start" %} + +## Step 5: Ask for reviews {#review} + +{% include excol.html type="middle" %} + +Translations are reviewed before they are published. + +1. [Change the state of your pull request to "Ready for review" {% include_cached external.html %}](https://docs.github.com/pull-requests/collaborating-with-pull-requests/proposing-changes-to-your-work-with-pull-requests/changing-the-stage-of-a-pull-request) and change the beginning of your Pull Request title to `[READY FOR REVIEW]` +2. Communicate your Pull Request is ready for review in the [GitHub issue created at step 2](#intent). +3. Review discussions take place directly in the GitHub Pull Request. Work together with reviewer(s) to: + - fix issues; + - accurately convey the meaning from the English version; + - consider different dialects. + +If you have any questions about the wording, please report them in the GitHub issue so that WAI team and other volunteers can help. + +{% include excol.html type="end" %} + +{% include excol.html type="start" %} + +## Step 6: Wait for publication {#publication} + +{% include excol.html type="middle" %} + +When the review is done, WAI team will: +- do some final checks; +- merge the Pull Request; +- publish the translation on WAI website. + +Please note these steps may take some time depending on other ongoing priorities. + +{% include excol.html type="end" %} + +## Priorities for Translating WAI Resources {#priorities} + +{% assign alldocs=site.documents | concat: site.pages %} +{%- if site.data.translations-priorities -%} +Date: Updated 9 April 2024.
+Editor: Rémi Bétin.
+--- + +{::nomarkdown} +{% include box.html type="start" h="2" title="Summary" class="full" %} +{:/} + +This page guides you through the technical steps to translate images from the Web Accessibility Initiative (WAI) website. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::options toc_levels="2" /} +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" class="simple" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +## Overview + +There are 3 steps to create translated versions of our resources' images: + +1. **[Translate the SVG source file](#translate-svg)** + +2. **[Export the translated image as a PNG](#export-png)** + +3. **[Commit your changes and open a Pull Request](#commit-changes)** + +## Initial step: Declare your intent + +In accordance with the WAI Translations process, please always [declare your intent](/about/translating/resources/#intent) and wait for reply from WAI team _before_ starting a translation. + +## Step 1: Translate the SVG source file {#translate-svg} + +When an image from our website can be translated, it is usually provided with an SVG source file. + +In general: +- PNG images are located in the `content-images/` folder of the repository +- SVG source files are located in the `content-images/source/` folder of the repository. + +**What you will need:** +- An SVG editor. The following instructions apply to the [Inkscape](https://inkscape.org/) editor that have been used by some volunteers. +- The ["Noto Sans" font family](https://fonts.google.com/noto/specimen/Noto+Sans), including the _Regular_ and _Bold_ weights, installed on your computer. + +**Translating the SVG source file:** + +{::nomarkdown} +Date: Updated 27 May 2024.
+Editors: Shawn Lawton Henry and Rémi Bétin.
+Developed as part of the WAI-CooP project, co-funded by the European Commission.
+--- + +{::nomarkdown} +{% include box.html type="start" h="2" title="Summary" class="full" %} +{:/} + +This page provides specific translation instructions for some WAI resources. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::options toc_levels="2" /} +{::nomarkdown} +{% include toc.html type="start" title="Page Contents" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +{::nomarkdown} +{% include excol.html type="all" %} +{:/} + + +{::nomarkdown} +{% include excol.html type="start" id="video-introduction" %} +{:/} +## Video Introduction to Web Accessibility and W3C Standards +{::nomarkdown} +{% include excol.html type="middle" %} +{:/} + +Check if the [player is available in your translated language {% include_cached external.html %}](https://github.com/ableplayer/ableplayer/blob/master/README.md#user-content-supported-languages). + +Then, translate the ["Translations" section](/videos/standards-and-benefits/#translations) of the page accordingly: +1. **If the player _is_ available in the translated language**: + * Translate the following words from the video player interface: + * "Captions" + * "Show transcript" + * "Language" + * Update the image per the instructions below.\ + _If you are not able to update the image, let us know at [group-wai-translations@w3.org](mailto:group-wai-translations@w3.org)._ + * Make a new image to replace [show-language.png](https://www.w3.org/WAI/content-images/wai-video-standards-and-benefits/show-language.png). The circle is 7px #eed009 / rgb(238,208,9). + * Name it `show-language.[language tag].png`\ + For example: `show-language.fr.png` + * Upload it to the [content-images folder](https://github.com/w3c/wai-video-standards-and-benefits/tree/master/content-images/wai-video-standards-and-benefits) + * In your translation, add the language tag to the image path. +2. **If the player is _not_ available in the translated language**: + * Do not translate the following words from the video player interface; leave them in English: + * "Captions" + * "Show transcript" + * "Language" + * Mark up the English words with the lang attribute:\ + `Captions`. + +{::nomarkdown} +{% include excol.html type="end" %} +{:/} + +{::nomarkdown} +{% include excol.html type="start" id="perspective-videos" %} +{:/} +## Web Accessibility Perspectives Videos +{::nomarkdown} +{% include excol.html type="middle" %} +{:/} + +The ["Web Accessibility Perspective Videos"](/perspective-videos/) page points to the resource sub-pages. Once you have translated a sub-page, add your language tag to the end of the corresponding URL in the index page. + +{::nomarkdown} +{% include box.html type="start" title="Example for a French translation:" %} +{:/} +```markdown +{% raw %}- [![](img/thumbnails/keyboard.jpg)alt
Decision Tree)
+{::nomarkdown}
+{% include excol.html type="middle" %}
+{:/}
+
+Tutorials resources use a dynamic footer, using specific metadata set in the "front matter" of each page.
+
+{::nomarkdown}
+{% include box.html type="start" title="Example:" %}
+{:/}
+```yaml
+metafooter: true
+editors:
+ - Eric Eggert: "https://www.w3.org/People/yatil/"
+ - Shadi Abou-Zahra: "https://www.w3.org/People/shadi/"
+update_editors:
+ - Brian Elton
+contributing_participants:
+ - see Acknowledgements
+support: Developed by the Education and Outreach Working Group (EOWG). Developed with support from the WAI-ACT project, co-funded by the European Commission IST Programme.
+```
+{::nomarkdown}
+{% include_cached box.html type="end" %}
+{:/}
+
+1. Translate content in `editors`, `update_editors`, `contributing_participants` and `support`.\
+2. Translate the Working Group name and leave the Working Group acronym in English.
+3. Add translations for the following terms, used by the footer, in the [translations.yml file {% include_cached external.html %}](https://github.com/w3c/wai-website-data/blob/master/translations.yml) located in [the `wai-website-data` repository {% include_cached external.html %}](https://github.com/w3c/wai-website-data/):
+- "Editors:"
+- "Update Editor:"
+- "Status"
+- "Updated"
+- "first published"
+
+{::nomarkdown}
+{% include excol.html type="end" %}
+{:/}
\ No newline at end of file
diff --git a/pages/about/translating/resources/subtitles.md b/pages/about/translating/resources/subtitles.md
new file mode 100644
index 00000000000..463a200c135
--- /dev/null
+++ b/pages/about/translating/resources/subtitles.md
@@ -0,0 +1,267 @@
+---
+title: "How to Create Translated Video Subtitles and Descriptions"
+nav_title: Subtitles/Descriptions
+lang: en
+last_updated: 2024-04-12
+description: Help make the Web accessible to people with disabilities around the world. We appreciate your contributions to translating W3C WAI accessibility resources.
+
+permalink: /about/translating/resources/subtitles/
+ref: /about/translating/resources/subtitles/
+redirect_from:
+ - /about/translating/guides/video-subtitles/
+
+image: /content-images/about/social-translations.png
+feedbackmail: wai@w3.org
+
+footer: |
+ Date: Updated 12 April 2024.
+Editors: Shawn Lawton Henry and Rémi Bétin.
+Developed as part of the WAI-CooP project, co-funded by the European Commission.
+--- + +{::nomarkdown} +{% include box.html type="start" h="2" title="Summary" class="full" %} +{:/} + +This page guides you through the technical steps to create new subtitles and translated descriptions for Web Accessibility Initiative (WAI) videos. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::options toc_levels="2" /} +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" class="simple" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +## Overview + +There are 4 steps to create new subtitles or video descriptions in a new language: + +1. **[Find the related video identifier](#find-video-id)** + +2. **[Edit the video-metadata.yml file](#video-metadata)** + +3. **[Create the subtitles/descriptions file(s)](#create-vtt)** + +4. **[Translate the VTT file(s)](#translate-vtt)** + +5. **[Commit your changes and open a Pull Request](#commit-changes)** + +## Initial step: Declare your intent + +In accordance with the WAI Translations process, please always [declare your intent](/about/translating/resources/#intent) and wait for reply from WAI team _before_ starting a translation. + +## Step 1: Find the related video identifier {#find-video-id} + +If you want to create new subtitles/translated descriptions for a video, you have probably seen the video in a W3C WAI resource. + +You now need to find the video identifier. + +Look for the embedded video player **in the code of the English version of the page**. It looks similar to this: + +{::nomarkdown} +{% include box.html type="start" title='Video player example' %} +{:/} + +```liquid +{% raw %}{% include video-player-data.html + yt-id="20SHvU2PKsM" + video-id="keyboard" +%}{% endraw %} +``` + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +**The value of `video-id` is the video identifier you need.** + + +## Step 2: Edit video-metadata.yml {#video-metadata} + +WAI videos captions, subtitles and descriptions metadata are stored in "video-metadata.yml" in ["wai-website-data" repository {% include_cached external.html %}](https://github.com/w3c/wai-website-data/) + +{::nomarkdown} +{% include box.html type="start" title="Video-metadata.yml example" %} +{:/} +{% include excol.html type="start" id="optional-id" %} + +Show example + +{% include excol.html type="middle" %} + +```yaml +- id: keyboard + name: + en: "Web Accessibility Perspectives: Keyboard Compatibility" + fr: "L'accessibilité Web illustrée : Compatibilité avec le clavier" + main-page: /perspective-videos/keyboard/ + path: perspective-videos + lang-folder: true + captions: + - en + captions-ad: + - en + subtitles: + - fr + - zh-hans + subtitles-ad: + - fr + - zh-hans + descriptions-ad: + - en + - fr + - zh-hans +``` +{% include excol.html type="end" %} +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::nomarkdown} +Date: Updated 27 May 2024.
+Editors: Shawn Lawton Henry and Rémi Bétin.
+Developed as part of the WAI-CooP project, co-funded by the European Commission.
+--- + +{::nomarkdown} +{% include box.html type="start" h="2" title="Summary" class="full" %} +{:/} + +This page guides you through the technical steps to translate Web Accessibility Initiative (WAI) resources, and provides other important guidance. + +For instructions on translating the Web Content Accessibility Guidelines (WCAG), see [[How to Translate WCAG 2]](/about/translating/wcag/). + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::options toc_levels="2" /} +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" class="simple" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +## Overview + +There are 4 main technical steps to create a new WAI resource translation: + +1. **[Create a new file](#create-file)** + +2. **[Update the "front matter" metadata](#frontmatter)** + +3. **[Translate the main content](#main-content)** + +4. **[Commit your changes and open a Pull Request](#commit-changes)** + +## General guidance + +- **Do not change or adapt or add to the meaning of the English version** in your translation.\ +If you have suggestions for changes to the English version, submit them via GitHub or e-mail using the links in the “Help improve this page” box near the bottom of the page. +- **Before starting, find the relevant ["language tag"](https://www.w3.org/International/questions/qa-choosing-language-tags)** from the [Language Subtag Registry {% include_cached external.html %}](https://www.iana.org/assignments/language-subtag-registry/language-subtag-registry). You will use it many times during the translation. +- The markdown files are very sensitive to indentation, commas, quotes, and special characters. **We recommend that you use a markdown editor or a simple text editor** (including GitHub interface) — and not a document editor like Microsoft Word that often changes quotes and indentation. +- **Some resources have specific instructions.** Please take a look at [[Resource-Specific Translation Instructions]](/about/translating/resources/resource-specific-instructions/) to see if this applies to your targeted resource, and follow these additional instructions if it does. +- If you wish to translate the [WCAG-EM Report Tool](https://www.w3.org/WAI/eval/report-tool/), please read [this specific guidance {% include_cached external.html %}](https://github.com/w3c/wai-wcag-em-report-tool/wiki/How-to-add-a-language), as different steps have to be followed. + +## Initial step: Declare your intent + +In accordance with the WAI Translations process, please always [declare your intent](/about/translating/resources/#intent) and wait for reply from WAI team _before_ starting a translation. + +## Step 1: Create a new file {#create-file} + +Duplicate the file used by the original version, with the language shortcode added to the middle of the filename, as follows: + +{::nomarkdown} +{% include box.html type="start" title="Example" %} +{:/} + +- Original English file: `index.md` +- New Korean file: `index.ko.md` + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +## Step 2: Update the "front matter" metadata {#frontmatter} + +{::nomarkdown} +{% include box.html type="start" class="highlighted" %} +{:/} + +From now on, only edit the newly created translation file. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +At the top of WAI website files are some metadata, also known as "front matter". + +Your first step into the file is to update this section. + +{::nomarkdown} +{% include box.html type="start" title="Example of front matter (this may differ on your file)" %} +{:/} +```yaml +--- +title: Evaluation Tools Overview +lang: en +last_updated: 2020-04-28 + +github: + repository: w3c/wai-eval-tools-overview + path: "content/index.md" + +permalink: /test-evaluate/tools/ +ref: /test-evaluate/tools/ + +footer: > +Date: Updated 28 April 2020.
+Editor: Shawn Lawton Henry.
+Video developed by the Education and Outreach Working Group (EOWG) with support from the WAI-Guide project funded by the European Commission (EC) under the Horizon 2020 program (Grant Agreement 822245). Acknowledgements.
+--- +``` +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +### 2.1. Update the following front matter values: + +`lang` +- Replace the original value (`en`) with the language shortcode of your translation. + +`last_updated` +- Change `last_updated: 2000-00-00` to the date you finish the translation. + Use the format: YYYY-MM-DD (with month in the middle). + +`path` (below `github`) +- Add the language shortcode at the middle of the filename. + +`permalink` +- Add the language shortcode at the end of the permalink, with no `/` at the end. + +`footer` (not always present) +- If this attribute is present, translate its content. +- Do not change the dates in this section. Those dates should be the same in your translation as in the English version. + +### 2.2. Add translators & contributors names. + +After `last_updated`, add these lines, depending on how many translators there are and if there are contributors. + +Policy for names and links is introduced in [Translating WAI Resources]({{ "/about/translating" | relative_url }}#links). + +{::nomarkdown} +{% include box.html type="start" %} +{:/} +```yaml +translators: + - name: "Your Name" +contributors: + - name: "Other Name" + - name: "Other Name" +``` +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +Or, if the lines are there with "`#`" before them to comment them out: delete the # and the space. + +{::nomarkdown} +{% include box.html type="start" title="Updated front matter for a translation into French" %} +{:/} +{% include excol.html type="start" id="optional-id" %} + +Show example + +{% include excol.html type="middle" %} + +```yaml +--- +title: Evaluation Tools Overview +lang: fr +last_updated: 2023-09-13 + +translators: + - name: "Your Name" +contributors: + - name: "Other Name" + - name: "Other Name" + +github: + repository: w3c/wai-eval-tools-overview + path: "content/index.fr.md" + +permalink: /test-evaluate/tools/fr +ref: /test-evaluate/tools/ + +footer: > +Date : Mise à jour : 28 avril 2020.
+Rédactrice : Shawn Lawton Henry.
+Vidéo créée par le groupe de travail Éducation et Promotion (EOWG) avec le soutien du projet WAI-Guide financé par la Commission européenne (CE) dans le cadre du programme Horizon 2020 (convention de subvention n°822245) Remerciements.
+--- +``` + +{% include excol.html type="end" %} +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +### Follow additional inline instructions + +Many resources have inline instructions in the front matter (after the "`#`" character). + +Please follow these instructions. It will help you know what to translate/update and what to not change. + +## Step 3: Translate main content {#main-content} + +### Markdown/Code + +Please leave the code, HTML, and markdown as is without changing it. + +Make sure to: + +{::nomarkdown} +Date: Updated 9 April 2024.
+Editors: Rémi Bétin and Shawn Lawton Henry.
+Developed as part of the WAI-CooP project, co-funded by the European Commission.
+ +inline_css: | + figure.screenshot { + margin-block-start: 1em; + } + + figure.screenshot img { + box-shadow: 3px 3px 3px #ddd; + } + +--- +{::nomarkdown} +{% include box.html type="start" h="2" title="Summary" class="full" %} +{:/} + +This page guide you through the steps to translate WAI resources using GitHub. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::options toc_levels="2,3" /} +{::nomarkdown} +{% include toc.html type="start" title="Page Contents" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +{% include showhidebutton.html showtext="Show all screenshots" hidetext="Hide all screenshots" target=".screenshot" %} + +## Initial step: Declare your intent + +In accordance with the WAI Translations process, please always [declare your intent](/about/translating/resources/#intent) and wait for reply from WAI team _before_ starting a translation. + +## Create the translation file + +Paste the original content that you copied in step 2 into the editor (Ctrl+V or ⌘+V).
+🎉 Congrats! You have now created the translation file to work with.
+In the appearing "Propose changes" modal window, you can rename your commit message to better reflect what you have changed. Then, click on "Propose changes". + {% include showhidebutton.html showtext="Show screenshot" hidetext="Hide screenshot" target=".propose-changes" %}
+ +🎉 Congrats! You have now commited your changes!
+You are now in the "Comparing changes" view, between the "base" repository (the original W3C repository, beginning with w3c) and the "head" repository (your forked repository, beginning with your username).
+Click on "Create pull request", meaning that you propose to apply your changes to the official repository. If the button does not appear, you may need to refresh the page. {% include showhidebutton.html showtext="Show screenshot" hidetext="Hide screenshot" target=".compare-changes" %}
+ +Set the title of the Pull request using the following format: [IN-PROGRESS] Language - Resource Title.
+Then, in the "Create pull request" button, select the drop-down arrow and select "Create draft pull request in the drop-down options. {% include showhidebutton.html showtext="Show screenshot" hidetext="Hide screenshot" target=".create-draft-pull-request" %} +
+ +Click on the "Draft pull request" button to confirm the submission of the pull request. + {% include showhidebutton.html showtext="Show screenshot" hidetext="Hide screenshot" target=".draft-button" %}
+ +🎉 Congrats! Your draft pull request is now created!
+After a few minutes, the notification comment will change to “✅ Deploy Preview for wai-repo-name ready!”.
+To see your preview, click on the "Deploy Preview" link. {% include showhidebutton.html showtext="Show screenshot" hidetext="Hide screenshot" target=".preview-ready" %}
+ +When you have done all your checks and edits, click on the "Edit" button at the top of the Pull request view, to change the title prefix "[IN-PROGRESS]" to "[Ready for Review]"
+Then, click on the "Ready for review" button, at the bottom of the Pull request view. {% include showhidebutton.html showtext="Show screenshot" hidetext="Hide screenshot" target=".ready-for-review-button" %}
+ +🎉 Congratulations, your pull request is now ready for review!
+You can see it is marked as "Open" with a green label at the top of the Pull request view. + {% include showhidebutton.html showtext="Show screenshot" hidetext="Hide screenshot" target=".ready-for-review-state" %}
+ +Date: Updated 7 March 2024.
+Editors: Shawn Lawton Henry and Rémi Bétin.
+Developed as part of the WAI-CooP project, co-funded by the European Commission.
+--- +{::nomarkdown} +{% include box.html type="start" h="2" title="Summary" class="full" %} +{:/} + +This page explains how you can participate in reviewing translations of Web Accessibility Initiative (WAI) resources. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::options toc_levels="2" /} +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" class="simple" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +## Overview + +The W3C Web Accessibility Initiative (WAI) welcomes contributions to review volunteer translations, before they are published on the WAI website. + +There are 3 steps to contribute as a reviewer: + +1. **[Find a translation](#find)** ready for review + +2. **[Declare your intent](#volunteer)** to review the translation by a date. + +3. **[Review the translation](#review)** and work together with the translator(s) and other reviewer(s). + +## Step 1: How to find a translation ready for review? {#find} + +You can find translations to review in the [GitHub project {% include_cached external.html %}](https://github.com/orgs/w3c/projects/46/views/2) we use to track translations: +- Filter the view by clicking on your language in the "Language" sidebar. +- Look into the "Review" column of the board: this lists the translations that are ready for review. + +If you have trouble using this _board_ view, you can look at the [equivalent table view {% include_cached external.html %}](https://github.com/orgs/w3c/projects/46/views/3), or you can contact us at [group-wai-translations@w3.org](mailto:group-wai-translations@w3.org)[^1]. + +## Step 2: How to volunteer? {#volunteer} + +### New volunteers: + +Please send an email to [group-wai-translations@w3.org](mailto:group-wai-translations@w3.org)[^1], expressing your interest in joining the WAI translator volunteers community. We will guide you through the next steps. + +### Regular volunteers: + +Indicate that you are volunteering to review the translation in the related GitHub issue. + +**Please always indicate the date by which you plan to review the translation.** This way, we can ask other volunteers if you are unable to complete the review by this date. + +## Step 3: How to review a translation? {#review} + +Review discussions take place directly in the Pull Request(s) opened by the translator. These are listed in the GitHub issue related to the translation. + +We encourage you to respectfully share your comments, suggested changes, spotted issues; and to work together with the translator(s) and other reviewer(s) to improve the translation. + +The most important things for review are: +- **helping accurately convey the meaning from the English version** in the translated language. Translations must not change or adapt or add to the meaning of the English version in their translation. +- **using respectful disability terminology** in the target language and region. + +For that, you will need to read the English version and compare it to the translation. Often, it is best to have them open in side-by-side windows. A preview is included in most Pull Requests, to see the rendered page. + +{::nomarkdown} +{% include box.html type="start" title="Helpful tip" %} +{:/} + +A technique for proof-reading is to listen to the translation being read aloud, for example with a screen reader or text-to-speech in the operating system. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +### Specific wording {#specific-wording} +- Check [other translations in your language](/translations) to see how similar words and concepts have been translated. In particular, [Authorized Translations](https://www.w3.org/Translations/authorized.html) have had significant review and input. +- Consider different dialects. Where possible, the translation should use words and phrases that will be best understood across different areas. +- The translator and reviewer(s) might want to work together to consider different options for some wording. +- When you decide on translation of unclear words and phrases that will likely be in other resources, feel free to add them to the [Glossary for your language {% include_cached external.html %}](https://github.com/w3c/translation-glossaries). + +{::nomarkdown} +{% include box.html type="start" title="We are here to help" %} +{:/} + +If you have any questions for us about the wording, you can report them in the GitHub issue or send email to [group-wai-translations@w3.org](mailto:group-wai-translations@w3.org)[^1]. + +We are happy to help you decide on the best translated wording by sharing the considerations and nuances that went into choosing the wording for the English page. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +### Things to check {#initial-things-to-check} + +#### For all translations + +1. There is no code showing in the rendered page. +2. All of the text that should be translated, is actually translated. +3. The links work. +4. In the raw file, all alternative texts and other not-visible content are translated. + +#### For most resources on WAI website + +{::nomarkdown} +{% assign replacepattern = page.url | relative_url | prepend: "$1|" %}{% include t.html t='This volunteer translation might not accurately reflect the intentions of the English original.' replace=replacepattern %}
+{% include_cached icon.html name="check-circle" %} {% include t.html t='Translation up-to-date with the English version.' %}
+
{% include t.html t='Translation updated:' %} {{page.last_updated | date: "%Y-%m-%d"}}. {% include t.html t='English updated:' %} {{page.last_updated | date: "%Y-%m-%d"}}.
+
+ {%- capture translatorslabel %}{% include t.html t='Translators:' %}{%- endcapture %}
+ {% include peoplelist.html label=translatorslabel people=page.translators %}
+ {%- capture contributorslabel %}{% include t.html t='Contributors:' %}{%- endcapture %}
+ {% include peoplelist.html label=contributorslabel people=page.contributors %}
+ {%- assign replacepattern = "/about/translating/" | relative_url | prepend: "$1|" -%}
+
{% include t.html t='WAI thanks translators, and welcomes other translations.' replace=replacepattern %}
Date: Updated 2 May 2024.
+Editors: Rémi Bétin and Shawn Lawton Henry.
+Developed as part of the WAI-CooP project, co-funded by the European Commission.
+--- + +{::nomarkdown} +{% include box.html type="start" title="Summary" class="" %} +{:/} + +Translations sitemaps help you easily find a WAI resource to translate in your language. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::options toc_levels="2" /} +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include_cached toc.html type="end" %} +{:/} + +## Overview + +**A translation sitemap allows volunteer translators to [browse the list of WAI resources](#browse) for which translations are welcome, and [volunteer to translate it](#volunteer).** + +We currently have [{{ site.translations-sitemaps.size }} translations sitemaps](#list). + +_If you are a new volunteer_, please [send us an e-mail to express your interest](/about/translating/resources/#intent) first. + +## How to use a translation sitemap {#how-to-use} + +### Browse the list {#browse} + +Resources are presented in two ways: +- _Priorities_ only lists [priority resources](/about/translating/resources/#priorities) identified by the WAI team. +- _Sitemap_ lists all WAI resources for which translations are welcome, grouped by top-level pages. + +{::nomarkdown} +{% include box.html type="start" title="Note" class="simple" %} +{:/} + +All WAI Resources are not listed in Translations Sitemaps. That may be because a resource will be updated soon, is not intented for translation, or is far from ready to be translated. The list is regularly updated. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +For each resource listed, you can see: +- The name and URL of the resource in English (in the first column "WAI resource") +- The status of the translation (in the second column "Translation Status") + -{% include_cached icon.html name="check-circle" %} Up-to-date + the title and URL of the translation.
+ -{%- include_cached icon.html name="warning" -%} Translation needs update + the title and URL of the translation. We welcome a volunteer translation update.
+ -{% include_cached icon.html name="ex-circle" %} No translation. We welcome a volunteer translation.
+ +### Volunteer to translate a page or to update a translation {#volunteer} + +If a resource has not been translated in this language, or if a translation needs update, a link is displayed in the "Translation status" column. For example, "Volunteer to translate this page". + +Clicking on this link allows you to declare your intent, by pre-filling a GitHub issue in the [_wai-translations_ repository {% include_cached external.html %}](https://github.com/w3c/wai-translations/). + +**After submitting your intent, please always wait for a reply from WAI team**. We will check that the file is not currently being translated by someone else, and that it is not about to be updated. + +## List of Translations Sitemaps {#list} + +Translations sitemaps are generated for languages with an active volunteer community.\ +If you are an active volunteer and wish to use this feature, feel free to contact us at [group-wai-translations@w3.org](mailto:group-wai-translations@w3.org). + +_Languages are listed alphabetically by their English names._ + +{::nomarkdown} +Date: Updated 9 April 2024.
+Editor: Rémi Bétin.
+--- + +{::nomarkdown} +{% include box.html type="start" h="2" title="Summary" class="full" %} +{:/} + +This page guides you through the technical steps to translate the Web Content Accessibility Guidelines (WCAG) 2.2 and 2.1. + + +For general information on WCAG 2 Translations, see [Contributing to W3C Translations {% include_cached external.html %} +](https://www.w3.org/Consortium/Translation/) and [Policy for Authorized W3C Translations {% include_cached external.html %}](https://www.w3.org/2005/02/TranslationPolicy.html). + + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" class="simple" %} +{:/} + +{::options toc_levels="2" /} + +- This text will be replaced by the TOC. +{:toc} + +{::nomarkdown} +{% include_cached toc.html type="end" %} +{:/} + +## Overview + +Web Content Accessibility Guidelines (WCAG) 2.1/2.2 are currently [available in more than 14 languages](/standards-guidelines/wcag/translations/)! + +There are 5 main steps to create a new translation: +1. **[Get the source files](#get-source-files)** from [`w3c/wcag/` {% include_cached external.html %}](https://github.com/w3c/wcag/) repository +2. **[Translate WCAG source files](#translate-source-files)** +3. **[Export in HTML](#preview-export)** +4. **[Edit the HTML files](#edit-html)** to complete the translation +5. **[Deliver the final files to W3C](#deliver-files)** + +## Step 1: Get the source files {#get-source-files} + +1. Clone [`w3c/wcag` {% include_cached external.html %}](https://github.com/w3c/wcag/) repository (you can [fork {% include_cached external.html %}](https://docs.github.com/en/pull-requests/collaborating-with-pull-requests/working-with-forks/fork-a-repo) it first) +2. Base your translation on the following branch: +- [`main` branch {% include_cached external.html %}](https://github.com/w3c/wcag/tree/main) to translate WCAG 2.2 +- [`WCAG-2.1` branch {% include_cached external.html %}](https://github.com/w3c/wcag/tree/WCAG-2.1) to translate WCAG 2.1 + +## Step 2: Translate WCAG source files {#translate-source-files} + +The repository contains numerous folders and files. Only some of them are relevant to your WCAG translation. + +Translate user-oriented content in: +- `guidelines/index.html` +- All files in `guidelines/sc/` folder and subfolders +- All files in `guidelines/terms/` folder and subfolders +- `guidelines/input-purposes.html` +- `guidelines/relative-luminance.html` +- `guidelines/respect-config.js`: translate Editors list. +- `guidelines/wcag.json` +- All files in `acknowledgements/` folder + +Make sure to follow these translation guidelines: +- Update `lang` and `xml:lang` attributes in `index.html` and `relative-luminance.html`.\ + Use the appropriate "language tag" from the [Language Subtag Registry {% include_cached external.html %}](https://www.iana.org/assignments/language-subtag-registry/language-subtag-registry) +- Do not translate IDs +- Add `hreflang="en"` to links when needed, and `lang="en"` where needed. +- Follow the guidelines of the [W3C Internationalization Activity {% include_cached external.html %}](https://www.w3.org/International/): for example, the proper usage of language tags, encoding declarations, handling bidirectional text, etc. + +## Step 3: Preview your changes and export in HTML {#preview-export} + +To preview your changes, open the `index.html` file in a modern browser, on a local server. Without a server, the success criteria and glossary terms will not be included. + +Allow the script to compile and format the content. + +1. If your document has "ReSpec" errors or warnings, they will shop up at the top of the page, next to the "ReSpec Menu" link. +- Check and resolve (potential) "ReSpec Errors". +- Check "ReSpec Warnings". The original document may contain some warnings. Make sure you do not introduce new ones. + +2. Export in HTML using ReSpec: +- Activate the "Respec Menu" link in the top right corner +- Choose "Export...", then the "HTML" option. + +## Step 4: Edit the resulting HTML file and complete the translation {#edit-html} + +Rename the exported HTML file to `index.html` and make the following changes. + +### Add the translation header + +At the very start of your `body` element, add a [translation header, based on the provided boilerplate](https://www.w3.org/2005/02/TranslationPolicy#Disclamier). + +The text in this disclaimer must be in the target language, except for the original title and the reference to the Lead Translation Organization (LTO) at the top. + +### Make some edits + +- Translate the `.head` section +- In each `dfn-panel`, translate “Permalink” and “Referenced in:” +- Next to each Success Criterion, translate “Understanding” and “How to Meet” +- Change the ID of the ["5. Conformance"](https://www.w3.org/TR/WCAG22/#conformance) `Date: Updated 29 May 2024. First published March 2022.
+Editor: Shawn Lawton Henry. Contributors: WAI staff, Working Group Co-Chairs, and others working on accessibility at W3C.
+ +inline_css: | + h1 { + line-height:1; + } +--- + +{::nomarkdown} +{% include box.html type="start" title="Summary" class="" %} +{:/} + +This page highlights some current work at the [W3C](https://www.w3.org/Consortium/) Web Accessibility Initiative ([WAI](https://www.w3.org/WAI/about/)). It points out how you can contribute to making digital technology more accessible to people with disabilities. + +{::nomarkdown} +{% include box.html type="end" %} +{:/} + +{::nomarkdown} +{% include toc.html type="start" title="Page Contents" %} +{:/} + +- This will be replaced by an automatically generated TOC when using Markdown formatting. +{:toc} +{::nomarkdown} +{% include toc.html type="end" %} +{:/} + +## Introduction + +We hope you'll find accessibility work on this page that you are interested in contributing to through W3C WAI. After each item is the acronym of the responsible Working Group (WG) and some Task Forces (TF). Information about the groups is later in this page — first, let's look at some of the work itself. + +## Current Work Highlights + +### Updating Core Guidelines and Other Standards + +* **W3C Accessibility Guidelines (WCAG) 3** is in an exploratory phase, and will change substantially. It's years away from being finalized. Please see information on the latest draft and [review questions](/standards-guidelines/wcag/wcag3-intro/#for-your-review) in [[WCAG 3 Introduction]](/standards-guidelines/wcag/wcag3-intro/) _[AG WG]_ + +* **ARIA 1.3** — We're working on additional support for Braille and other [new features for ARIA 1.3](https://w3c.github.io/aria/#substantive-changes-targeted-for-the-1-3-release). _[ARIA WG]_ + +### Developing New Standards + +* **WAI-Adapt** enables users to adapt (or ‘personalize’) how content is presented to meet their needs and preferences. [[WAI-Adapt Overview]](/adapt/) _[APA WG, WAI-Adapt TF]_ + +* **Spoken Pronunciation** technical standards will enable screen readers and voice assistants to pronounce words correctly. [[Pronunciation Overview]](/pronunciation/) _[APA WG, Spoken Pronunciation TF]_ + +### Documenting Additional Guidance for People with Cognitive Disabilities, Low Vision, and Mobile Devices + +* **Cognitive Accessibility** — We're updating several documents to better address the needs of people with cognitive and learning disabilities. A current topic is mental health. [[Cognitive Accessibility at W3C]](/cognitive/). _[AG WG, COGA TF]_ + + + +* **Mobile Accessibility** work includes supporting on-going standards development, including supporting WCAG 3 exploration. [[Mobile Accessibility at W3C]](/standards-guidelines/mobile/) _[AG WG, Mobile TF]_ + +### Updating Guidance on Applying WCAG to ICT and on EPUB Accessibility + +* **WCAG2ICT** describes how Web Content Accessibility Guidelines (WCAG) 2 can be applied to non-web information and communications technologies (ICT). We are updating it to include WCAG 2.1 and WCAG 2.2. [[WCAG2ICT Overview]](/standards-guidelines/wcag/non-web-ict/) _[AG WG, WCAG2ICT TF]_ + +* **EPUB Fixed Layout Accessibility** aims to help publishers address challenges for people with cognitive disabilities and low vision. [EPUB Fixed Layout Accessibility Editor's Draft](https://w3c.github.io/epub-specs/epub33/fxl-a11y/) _[EPUB3 WG, FXL A11y TF]_ + +### Defining User Requirements (AUR) and Exploring Emerging Technologies + +Documenting the user needs of people with disabilities helps develop standards to meet those needs. It also helps designers, developers, and others better understand and meet user needs in order to support accessibility. XR Accessibility User Requirements and more listed at [Digital Accessibility User Requirements](/research/user-requirements/). + +* **Natural Language Interface Accessibility User Requirements ("NAUR")** is a Working Draft. A natural language interface is a user interface in which the user and the system communicate via a natural (human) language. The user provides input as sentences via speech or other input, and the system generates responses as sentences delivered by speech, text, or another modality. [About NAUR](https://www.w3.org/blog/2021/10/natural-language-interface-accessibility-user-requirements-call-for-review/), [NAUR Editor's Draft](https://w3c.github.io/naur/) _[APA WG, Research TF]_ + +* **Collaboration Tools Accessibility User Requirements ("CTAUR")** is a Working Draft. It describes user needs in tools that provide support for one or more specific collaborative features. These features include real-time editing of content by multiple authors, the use of comments or annotations, and revision control. [CTAUR Editor's Draft](https://w3c.github.io/ctaur/) _[APA WG, Research TF]_ + +* **How People with Disabilities Use the Web** helps you understand user needs broadly. We're updating this resource and publishing new videos to go along with it. [[How People with Disabilities Use the Web - previous version]](/people-use-web/), [in-progress update of How People with Disabilities Use the Web](https://deploy-preview-113--wai-people-use-web.netlify.app/people-use-web/) + +### Testing Accessibility, Evaluation Tools + +* **Easy Checks – A First Review of Web Accessibility** helps you start to assess the accessibility of a web page. We are polishing up a new iteration of this popular resource. [Easy Checks - previous version](/test-evaluate/preliminary/), [draft new version of Easy Checks](/test-evaluate/easy-checks/) +* **ACT Rules** (Accessibility Conformance Testing (ACT), also known as "WCAG 2 Test Rules") describe ways to test conformance to WCAG success criteria. They are primarily for developers of evaluation tools and test methodologies. We're documenting more. [[About WCAG 2 Test Rules]](/standards-guidelines/act/rules/about/) _[AG WG, ACT TF]_ +* ACT Rules implementation reports list how test tools or methodologies address test examples in ACT Rules. We invite evaluation tool developers to [submit implementation reports](https://act-rules.github.io/pages/implementations/reporting/). _[AG WG, ACT TF]_ + +### Translating Resources + +Existing translations of W3C accessibility resources are listed in [All WAI Transations](/translations/). We are currently working with volunteer translators to translate more resources and update existing translations, including translations of WCAG and of educational resources. See [Translating WAI resources](/about/translating/). + +## Upcoming Publications + +We plan to publish the following documents in June 2024. + +* More [WAI Translations](/translations/) - usually some each week +* [Guidance on Applying WCAG 2.2 to Non-Web Information and Communications Technologies (WCAG2ICT) Draft](https://www.w3.org/TR/2023/DNOTE-wcag2ict-20230815/) +* [Collaboration Tools Accessibility User Requirements Draft](https://w3c.github.io/ctaur/) +* and some updated educational resources + +Recent publications and other announcements are listed on the **[News page](/news/)**. + +### Get Updated + +To get news via e-mail, LinkedIn, Mastodon, or Atom/RSS feed when these and other accessibility documents are _ready for review_ or _published as final_, see **[[Get WAI News]](/news/subscribe/)**. + +## Wait, Wait, There's More + +This page _only lists about half_ of the active work on accessibility at W3C. Much of the work happens in the accessibility groups that are introduced in the [W3C Working Groups page](https://www.w3.org/groups/wg/#wg-list). + +To learn more about what the Working Groups are working on right now, see: + + +* [APA Current Work](/about/groups/apawg/#current-work) - Accessible Platform Architectures (APA) Working Group +* ARIA - Accessible Rich Internet Applications (ARIA) Working Group + * [ARIA Working Drafts](https://www.w3.org/groups/wg/aria/publications#WD) + * [ARIA Authoring Practices Guide (APG)](/about/groups/task-forces/practices/) +* [AG WG Current Work](/about/groups/agwg/#current-work) - Accessibility Guidelines (AG) Working Group + * [COGA Current Work](https://www.w3.org/WAI/GL/task-forces/coga/wiki/Main_Page) - Cognitive and Learning Disabilities Accessibility Task Force (COGA) + + * [Mobile Current Work](https://www.w3.org/WAI/GL/mobile-a11y-tf/wiki/Main_Page) - Mobile Accessibility Task Force +* [EPUB 3 Working Group document publication status and milestones](https://www.w3.org/publishing/groups/epub-wg/PublStatus) + +### Surprise! We already have a resource for that + +Did you know the WAI website has: **videos, tips, tutorials, tools** (for generating reports and accessibility statements), **translations** (over 35 languages), **training** (a free online course), and so much more. There are [resources for](/roles/) content authors, designers, developers, evaluators, testers, managers, policy makers, trainers, educators, web users, advocates, and people with disabilities. + +**Discover** accessibility resources that _you didn't even know existed_ from the annotated list of **[[WAI Resources]](/resources/).** When you find helpful information, would you **share it with others**. + +## How to Get Involved {#participate} + +To learn about contributing to W3C WAI accessibility work generally, **see [[Participating in WAI]](/about/participating/)**. + +Links to each Working Group's "How to Participate" page are in the [Participating in Working Groups section](/about/participating/#WGs). If you are interested in a Task Force, see participation information for the parent Working Group. + +We also offer ideas for [Promoting and Implementing Web Accessibility](/about/participating/#promoting-and-implementing-web-accessibility). + +**Translations:** If you might want to volunteer to contribute to translations, please see [Translating WAI resources](/about/translating/). You can send questions via e-mail to [group-wai-translations@w3.org](group-wai-translations@w3.org) We are currently seeking translators in all languages and reviewers for draft translations in Czech, Greek, Indonesian and Korean. + +**We look forward to your contributions to making the web more accessible to people with disabilities!** + +### Who is We + +"We" is: + +* [W3C](https://www.w3.org/Consortium/) Web Accessibility Initiative ([WAI](/about/)) +* W3C WAI Team: + * [Shawn Lawton Henry](https://www.w3.org/staff/#shawn) is Web Accessibility Initiative (WAI) Program Lead; Accessibility Education and Communications Lead. + * [Kevin White](https://www.w3.org/staff/#kevin) is Accessibility Technical Lead and supports the Accessibility Guidelines Working Group that develops Web Content Accessibility Guidelines (WCAG). + * [Roy Ruoxi Ran (冉若曦)](https://www.w3.org/staff/#ran) supports accessibility Working Groups and accessibility in China. + * [Daniel Montalvo](https://www.w3.org/staff/#dmontalvo) supports accessibility Working Groups and standards harmonization in Europe. + * [Ken Franqueiro](https://www.w3.org/staff/#kfranqueiro) develops the new technical architecture for WCAG 2, WCAG 3, and the WAI website. +* Participants of: [AG](https://www.w3.org/groups/wg/ag/participants), [APA](https://www.w3.org/groups/wg/apa/participants), [ARIA](https://www.w3.org/groups/wg/aria/participants), [EPUB](https://www.w3.org/groups/wg/epub/participants), [EO](https://www.w3.org/groups/wg/eowg/participants), and other [W3C groups](https://www.w3.org/groups/) + + + +{% include_cached excol.html type="start" id="changelog" %} + +## Updates to this page (changelog) + +{% include_cached excol.html type="middle" %} + +* 29 May 2024 + * Under "Updating Core Guidelines and Other Standards", in WCAG 3 list item, added: "Please see information on the latest draft and [review questions](/standards-guidelines/wcag/wcag3-intro/#for-your-review) in [[WCAG 3 Introduction]](/standards-guidelines/wcag/wcag3-intro/)..." + * Updated W3C WAI Staff listing - added Ken + * Updated [Upcoming Publications list](/update/#upcoming-publications) +* 30 April 2024 + * Just updated the dates in [Upcoming Publications list](/update/#upcoming-publications) +* 26 March 2024 + * Updated call for volunteer reviewers in [How to Get Involved](/update/##participate). +* 27 February 2024 + * Removed [Digital Publishing WAI-ARIA Module 1.1](https://www.w3.org/TR/dpub-aria-1.1/) and [Digital Publishing Accessibility API Mappings 1.1](https://www.w3.org/TR/dpub-aam-1.1/) from [Upcoming Publications list](/update/#upcoming-publications) since we published them today as "Candidate Recommendations" + * Updated W3C WAI Staff listing + * Swapped order of "Applying WCAG to ICT" and "EPUB Accessibility" +* 14 February 2024 + * Added [Translating Resources section](/update/#translating-resources) and more about translations under [How to Get Involved](/update/##participate). + * Added Easy Checks under the [Testing Accessibility section](/update/#testing-accessibility-evaluation-tools). + * Removed inactive work. +* 1 February 2024 + * Removed ARIA 1.3 from [Upcoming Publications list](/update/#upcoming-publications) since we published it in January +* 22 December 2023 + * Just updated the dates in [Upcoming Publications list](/update/#upcoming-publications) +* 30 November 2023 + * Updated [Upcoming Publications list](/update/#upcoming-publications) + * To [Get Updated](/update/#get-updated) section: deleted Twitter; added Mastodon and LinkedIn + * To [How to Get Involved](/update/#participate) section, added:We welcome translations! If you might want to volunteer to contribute to translations, please see [Translating WAI resources](/about/translating/)+ * Updated W3C WAI Staff listing +* 30 October 2023 - no changes +* 5 October 2023 + * Removed WCAG 2.2 from Current Work Highlights and Upcoming Publications, because we published it today. +* 27 September 2023 + * Updated [Upcoming Publications list](/update/#upcoming-publications) +* 30 August 2023 + * Updated the WCAG 2.2 item under "Current Work Highlights" to change "August 2023" to "2023" + * Removed [Artificial Intelligence (AI) and Accessibility Research Symposium report](/research/ai2023/) from current work and upcoming publications, because we published it today + * Added links to new WCAG2ICT draft + * Updated "Defining User Requirements (AUR) and Exploring Emerging Technologies" section with link to [Digital Accessibility User Requirements](/research/user-requirements/) and information on "Collaboration Tools Accessibility User Requirements" + * Updated [Upcoming Publications list](/update/#upcoming-publications) + * Added W3C WAI staff titles under [Who is We](/update/#who-is-we) +* 31 July 2023 + * Updated W3C WAI staff roles under [Who is We](/update/#who-is-we) + * Updated [Upcoming Publications list](/update/#upcoming-publications). + * Minor update to WCAG 3 current work highlights bullet. +* 30 June 2023 + * Updated the Upcoming Publications list, including adding WCAG2ICT + * Updated the WCAG 2.2 item under "Current Work Highlights" + * Added AI Accessibility Symposium under "Defining User Requirements (AUR) and Emerging Technologies" +* 6 June 2023 — From the Upcoming Publications list, removed [ARIA 1.2](https://www.w3.org/TR/wai-aria-1.2/) because we published it today as a W3C Reccommendation, Web Standard. +* 31 May 2023 — Edits and updates in [Specific Invitations](/update/#specific-invitations) +* 30 May 2023 + * Added [Specific Invitations](/update/#specific-invitations) with call for participation in EOWG work and FAST TF. + * Updated [Upcoming Publications list](/update/#upcoming-publications). +* 28 April 2023 + * Updated W3C WAI staff roles under [Who is We](/update/#who-is-we). + * Updated [Upcoming Publications list](/update/#upcoming-publications). + * Minor update to WCAG 2.2 current work highlights bullet. + * Deleted Low Vision Task Force work, since it's on hold for a while. +* 31 March 2023 — Updated [Upcoming Publications list](/update/#upcoming-publications) and minor update to WCAG 2.2 current work highlights bullet. +* 28 February 2023 — No substantive updates. +* 31 January 2023 — Updated [Upcoming Publications list](/update/#upcoming-publications). +* 21 December 2022 + * Updated [Testing Accessibility, Evaluation Tools section](/update/#testing-accessibility-evaluation-tools): updated ACT Rules wording and added invitation for evaluation tool vendors to submit implementation reports. + * Updated [Upcoming Publications list](/update/#upcoming-publications). +* 1 December 2022 — Updated [Upcoming Publications list](/update/#upcoming-publications). +* 9 November 2022 — We're in the process of updating our Mastodon account w3c.social/@wai. +* 1 November 2022 — Updated WCAG 2.2 final publication to early 2023. +* 19 October 2022 — Updated [Upcoming Publications list](/update/#upcoming-publications). +* 29 September 2022 + * Updated [Upcoming Publications list](/update/#upcoming-publications). + * Under Current Work Highlights section, deleted "Teaching Accessibility. Curricula on Web Accessibility...", since we published it today +* 12 September 2022 + * Updated [Upcoming Publications list](/update/#upcoming-publications). + * Updated WCAG 2.2 bullet under Current Work Highlights. + * Deleted "ARIA 1.2..." from Current Work Highlights because we completed documenting immplementations and are ready to go from CR to PR. + * [GitHub diff 12 Sept](https://github.com/w3c/wai-about-wai/pull/156/files), [rich text diff 12 Sept](https://github.com/w3c/wai-about-wai/pull/156/files?short_path=a0b7030#diff-a0b70304b865682a8845dbdca495ffbbc8198d8e7fc20066ce55181ffe0d461c) +* 31 August 2022 — Updated [Upcoming Publications list](/update/#upcoming-publications). +* 24 August 2022 — Added [ARIA Authoring Practices Guide (APG)](/about/groups/task-forces/practices/). Updated [Upcoming Publications list](/update/#upcoming-publications). [GitHub diff 24 Aug](https://github.com/w3c/wai-about-wai/pull/154/files), [rich text diff 24 Aug](https://github.com/w3c/wai-about-wai/pull/154/files?short_path=a0b7030#diff-a0b70304b865682a8845dbdca495ffbbc8198d8e7fc20066ce55181ffe0d461c) +* 1 August 2022 — Updated [Upcoming Publications list](/update/#upcoming-publications). +* 12 July 2022 — added: **WCAG2ICT** describes how Web Content Accessibility Guidelines (WCAG) 2 can be applied to non-web information and communications technologies (ICT). We are updating it to include WCAG 2.1 and WCAG 2.2. [[WCAG2ICT Overview]](/standards-guidelines/wcag/non-web-ict/) _[AG WG, WCAG2ICT TF]_ +* 30 June 2022 — Updated [Upcoming Publications list](/update/#upcoming-publications). Updated 'Personalization' to 'WAI-Adapt'. +* 5 May 2022 — Just a few little language edits. +* 30 April 2022 — Minor updates shown in [GitHub diff 30 Apr](https://github.com/w3c/wai-about-wai/pull/141/files), [rich text diff 30 Apr](https://github.com/w3c/wai-about-wai/pull/141/files?short_path=a0b7030#diff-a0b70304b865682a8845dbdca495ffbbc8198d8e7fc20066ce55181ffe0d461c) + +{% include_cached excol.html type="end" %} diff --git a/pages/about/using-wai-material.md b/pages/about/using-wai-material.md new file mode 100644 index 00000000000..285cf39f17e --- /dev/null +++ b/pages/about/using-wai-material.md @@ -0,0 +1,111 @@ +--- +title: "Using WAI Material: Permission to Use with Attribution" +title_html: "Using WAI Material:
Date: Updated 1 February 2023.
+Editor: Shawn Lawton Henry.
+Developed with the WAI staff and W3C Communications staff.
+--- + +{::nomarkdown} +{% include box.html type="start" h="2" title="Summary" class="full" %} +{:/} + +You can generally use WAI material for free, with two conditions: +1. Clearly attribute the original source and include a link to it, as specified below, and +2. Not modify the content (with few exceptions noted below). + +Resources on the WAI website are Copyright World Wide Web Consortium. W3C® liability, trademark and document use rules apply to all pages, unless otherwise noted as Creative Commons license.
+ +{::nomarkdown} +{% include box.html type="end" %} +{:/} + + +{::options toc_levels="2" /} + +{::nomarkdown} +{% include_cached toc.html type="start" title="Page Contents" %} +{:/} + +- TOC is created automatically. +{:toc} + +{::nomarkdown} +{% include_cached toc.html type="end" %} +{:/} + + +## Introduction + +W3C Web Accessibility Initiative (WAI) develops material to help make the web accessible to people with disabilities. **As with all W3C material, WAI material is generally available for you to use, for free**, with the two conditions below. For example, you can use WAI material by linking to it from your blog, putting it on a USB flash drive and handing it out at a conference, copying it on your intranet, printing it and handing it out to students, including it in a book, etc. + +**There are two conditions for using most WAI material**; you must: + +1. Clearly attribute the original source as specified below, and +2. Not modify the content (with few exceptions noted below). + +Please read below for more details, including [**some material that is specially-marked content that you can modify**](#cc). + +## Official W3C Document License + +Most WAI material (including the standards and guidelines) is provided under the [W3C Document License](https://www.w3.org/Consortium/Legal/copyright-documents) which grants permission to copy and distribute complete documents in any medium for any purpose, without fee or royalty, provided that you **include URL, copyright, and status** (if it exists). This does not allow modifications or the creation of derivative works, so as to prevent interoperability problems and other problems. For more information, see the [Intellectual Rights FAQ](http://www.w3.org/Consortium/Legal/IPR-FAQ-20000620). + +## Creative Commons Licensed (CC) Material for You to Adapt +{:#cc} + +A few WAI Resources are specifically designed for you to adapt, such as presentation slides. These are provided under a Creative Commons License (CC). We are happy for you to edit, change, and present such CC material *under the following conditions*: + +1. You **include the URI (web address), copyright, and status** (if it exists, e.g., "Draft") of the original WAI material. +2. You **clearly attribute the material** appropriately.