From 4bb540a24e2fe142adf7d0a2b7e7db0a7f9416cc Mon Sep 17 00:00:00 2001 From: jbottesini Date: Sat, 9 Sep 2023 23:39:56 -0300 Subject: [PATCH] Make small aesthethic changes to shiny --- .../execute-results/html.json | 8 +-- chapters/evaluation_data_analysis.qmd | 3 - chapters/evaluation_data_input.qmd | 13 +--- docs/chapters/evaluation_data_input.html | 62 +++++++----------- shinyapp/DataExplorer/app.R | 19 +++--- shinyapp/DataExplorer/shiny_explorer.rds | Bin 20034 -> 77498 bytes 6 files changed, 37 insertions(+), 68 deletions(-) diff --git a/_freeze/chapters/evaluation_data_input/execute-results/html.json b/_freeze/chapters/evaluation_data_input/execute-results/html.json index 92fe285..3696a88 100644 --- a/_freeze/chapters/evaluation_data_input/execute-results/html.json +++ b/_freeze/chapters/evaluation_data_input/execute-results/html.json @@ -1,10 +1,8 @@ { - "hash": "0a4ce8e519048dfd63b05597bb96ddbf", + "hash": "c76fbe826325ec2c8e8ad614a78c1b21", "result": { - "markdown": "# Evaluation data: input/features\n\n\n::: {.cell}\n\n```{.r .cell-code code-summary=\"load packages\"}\nlibrary(tidyverse) \n\n# data acquisition ----\n#devtools::install_github(\"bergant/airtabler\")\nlibrary(airtabler)\n\n# data cleaning & shaping ----\n\n# data analysis ----\n# library(lme4)\n# library(lmtest) # Testing Linear Regression Models\n\n# markdown et al. ----\nlibrary(knitr)\nlibrary(bookdown)\nlibrary(quarto)\nlibrary(formattable) # Create 'Formattable' Data Structures\n\n# others ----\nlibrary(here) # A Simpler Way to Find Your Files\n#devtools::install_github(\"metamelb-repliCATS/aggreCAT\")\n#library(aggrecat)\n\n# Make sure select is always the dplyr version\nselect <- dplyr::select \n\n# options\noptions(knitr.duplicate.label = \"allow\")\n```\n:::\n\n\n\n::: {.callout-note collapse=\"true\"}\n## Note on data input (10-Aug-23)\n\nBelow, the evaluation data is input from an Airtable, which itself was largely hand-input from evaluators' reports. As PubPub builds (target: end of Sept. 2023), this will allow us to include the ratings and predictions as structured data objects. We then plan to access and input this data *directly* from the PubPub (API?) into the present analysis. This will improve automation and limit the potential for data entry errors.\n\n::: \n\n\n::: {.cell}\n\n```{.r .cell-code code-summary=\"input from airtable\"}\nbase_id <- \"appbPYEw9nURln7Qg\"\n\n\n# Set your Airtable API key \nSys.setenv(AIRTABLE_API_KEY = Sys.getenv(\"AIRTABLE_API_KEY\"))\n#this should be set in my .Renviron file\n\n# Read data from a specific view\n\nevals <- air_get(base = base_id, \"output_eval\") \n\nall_pub_records <- data.frame()\npub_records <- air_select(base = base_id, table = \"crucial_research\")\n\n# Append the records to the list\nall_pub_records <- bind_rows(all_pub_records, pub_records)\n\n# While the length of the records list is 100 (the maximum), fetch more records\nwhile(nrow(pub_records) == 100) {\n # Get the ID of the last record in the list\n offset <- get_offset(pub_records)\n \n # Fetch the next 100 records, starting after the last ID\n pub_records <- air_select(base = base_id, table = \"crucial_research\", offset = offset)\n \n # Append the records to the df\n all_pub_records <- bind_rows(all_pub_records, pub_records)\n}\n\n# housekeeping\nrm(pub_records)\n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code code-summary=\"just the useful and publish-able data, clean a bit\"}\n# clean evals names to snakecase\ncolnames(evals) <- snakecase::to_snake_case(colnames(evals))\n\nevals_pub <- evals %>% \n dplyr::rename(stage_of_process = stage_of_process_todo_from_crucial_research_2) %>% \n mutate(stage_of_process = unlist(stage_of_process)) %>% \n dplyr::filter(stage_of_process == \"published\") %>% \n select(id, \n crucial_research, \n paper_abbrev, \n evaluator_name, \n category, \n source_main, \n author_agreement, \n overall, \n lb_overall, \n ub_overall, \n conf_index_overall, \n advancing_knowledge_and_practice, \n lb_advancing_knowledge_and_practice, \n ub_advancing_knowledge_and_practice, \n conf_index_advancing_knowledge_and_practice,\n methods_justification_reasonableness_validity_robustness,\n lb_methods_justification_reasonableness_validity_robustness,\n ub_methods_justification_reasonableness_validity_robustness,\n conf_index_methods_justification_reasonableness_validity_robustness, \n logic_communication, lb_logic_communication, ub_logic_communication, \n conf_index_logic_communication,\n engaging_with_real_world_impact_quantification_practice_realism_and_relevance,\n lb_engaging_with_real_world_impact_quantification_practice_realism_and_relevance,\n ub_engaging_with_real_world_impact_quantification_practice_realism_and_relevance,\n conf_index_engaging_with_real_world_impact_quantification_practice_realism_and_relevance,\n relevance_to_global_priorities, \n lb_relevance_to_global_priorities, \n ub_relevance_to_global_priorities, \n conf_index_relevance_to_global_priorities, \n journal_quality_predict, \n lb_journal_quality_predict, \n ub_journal_quality_predict,\n conf_index_journal_quality_predict, \n open_collaborative_replicable, \n conf_index_open_collaborative_replicable, \n lb_open_collaborative_replicable, \n ub_open_collaborative_replicable, \n merits_journal, \n lb_merits_journal, \n ub_merits_journal, \n conf_index_merits_journal)\n\n# shorten names (before you expand into columns)\nnew_names <- c(\n \"eval_name\" = \"evaluator_name\",\n \"cat\" = \"category\",\n \"crucial_rsx\" = \"crucial_research\",\n \"conf_overall\" = \"conf_index_overall\",\n \"adv_knowledge\" = \"advancing_knowledge_and_practice\",\n \"lb_adv_knowledge\" = \"lb_advancing_knowledge_and_practice\",\n \"ub_adv_knowledge\" = \"ub_advancing_knowledge_and_practice\",\n \"conf_adv_knowledge\" = \"conf_index_advancing_knowledge_and_practice\",\n \"methods\" = \"methods_justification_reasonableness_validity_robustness\",\n \"lb_methods\" = \"lb_methods_justification_reasonableness_validity_robustness\",\n \"ub_methods\" = \"ub_methods_justification_reasonableness_validity_robustness\",\n \"conf_methods\" = \"conf_index_methods_justification_reasonableness_validity_robustness\",\n \"logic_comms\" = \"logic_communication\",\n \"lb_logic_comms\" = \"lb_logic_communication\",\n \"ub_logic_comms\" = \"ub_logic_communication\",\n \"conf_logic_comms\" = \"conf_index_logic_communication\",\n \"real_world\" = \"engaging_with_real_world_impact_quantification_practice_realism_and_relevance\",\n \"lb_real_world\" = \"lb_engaging_with_real_world_impact_quantification_practice_realism_and_relevance\",\n \"ub_real_world\" = \"ub_engaging_with_real_world_impact_quantification_practice_realism_and_relevance\",\n \"conf_real_world\" = \"conf_index_engaging_with_real_world_impact_quantification_practice_realism_and_relevance\",\n \"gp_relevance\" = \"relevance_to_global_priorities\",\n \"lb_gp_relevance\" = \"lb_relevance_to_global_priorities\",\n \"ub_gp_relevance\" = \"ub_relevance_to_global_priorities\",\n \"conf_gp_relevance\" = \"conf_index_relevance_to_global_priorities\",\n \"journal_predict\" = \"journal_quality_predict\",\n \"lb_journal_predict\" = \"lb_journal_quality_predict\",\n \"ub_journal_predict\" = \"ub_journal_quality_predict\",\n \"conf_journal_predict\" = \"conf_index_journal_quality_predict\",\n \"open_sci\" = \"open_collaborative_replicable\",\n \"conf_open_sci\" = \"conf_index_open_collaborative_replicable\",\n \"lb_open_sci\" = \"lb_open_collaborative_replicable\",\n \"ub_open_sci\" = \"ub_open_collaborative_replicable\",\n \"conf_merits_journal\" = \"conf_index_merits_journal\"\n)\n\nevals_pub <- evals_pub %>%\n rename(!!!new_names)\n\n# Create a list of labels with the old, longer names\nlabels <- str_replace_all(new_names, \"_\", \" \") %>% str_to_title()\n\n# Assign labels to the dataframe / tibble\n# (maybe this can be done as an attribute, not currently working)\n# for(i in seq_along(labels)) {\n# col_name <- new_names[names(new_names)[i]]\n# label <- labels[i]\n# attr(evals_pub[[col_name]], \"label\") <- label\n# }\n\n\n# expand categories into columns, unlist everything\nevals_pub %<>%\n tidyr::unnest_wider(cat, names_sep = \"_\") %>% # give each of these its own col\n mutate(across(everything(), unlist)) # maybe check why some of these are lists in the first place\n \n\n# clean the Anonymous names\nevals_pub$eval_name <- ifelse(\n grepl(\"^\\\\b\\\\w+\\\\b$|\\\\bAnonymous\\\\b\", evals_pub$eval_name),\n paste0(\"Anonymous_\", seq_along(evals_pub$eval_name)),\n evals_pub$eval_name\n)\n\n#housekeeping\nrm(evals)\n\n#Todo -- check the unlist is not propagating the entry\n#Note: category, topic_subfield, and source have multiple meaningful categories. These will need care \n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nevals_pub_long <- evals_pub %>% \n pivot_longer(cols = -c(id, crucial_rsx, paper_abbrev, eval_name, \n cat_1,cat_2, cat_3,source_main,author_agreement),\n names_pattern = \"(lb_|ub_|conf_)?(.+)\",\n names_to = c(\"value_type\", \"rating_type\")) %>% # one line per rating type\n mutate(value_type = if_else(value_type == \"\", \"est_\", value_type)) %>% #add main rating id\n pivot_wider(names_from = value_type, \n values_from = value)\n```\n:::\n\n\n\n\n\n### Reconcile uncertainty ratings and CIs {-}\n\nWhere people gave only confidence level 'dots', we impute CIs (confidence/credible intervals). We follow the correspondence described [here](https://effective-giving-marketing.gitbook.io/unjournal-x-ea-and-global-priorities-research/policies-projects-evaluation-workflow/evaluation/guidelines-for-evaluators#1-5-dots-explanation-and-relation-to-cis). (Otherwise, where they gave actual CIs, we use these.)^[Note this is only a first-pass; a more sophisticated approach may be warranted in future.]\n\n::: {.callout-note collapse=\"true\"}\n## Dots to interval choices\n\n> 5 = Extremely confident, i.e., 90% confidence interval spans +/- 4 points or less)\n\nFor 0-100 ratings, code the LB as $max(R - 4,0)$ and the UB as $min(R + 4,100)$, where R is the stated (middle) rating. This 'scales' the CI, as interpreted, to be proportional to the rating, with a maximum 'interval' of about 8, with the rating is about 96.\n\n> 4 = Very*confident: 90% confidence interval +/- 8 points or less\n\nFor 0-100 ratings, code the LB as $max(R - 8,0)$ and the UB as $min(R + 8,100)$, where R is the stated (middle) rating. \n\n> 3 = Somewhat** confident: 90% confidence interval +/- 15 points or less \n\n> 2 = Not very** confident: 90% confidence interval, +/- 25 points or less\n\nComparable scaling for the 2-3 ratings as for the 4 and 5 rating.\n\n> 1 = Not** confident: (90% confidence interval +/- more than 25 points)\n \nCode LB as $max(R - 37.5,0)$ and the UB as $min(R + 37.5,100)$. \n \nThis is just a first-pass. There might be a more information-theoretic way of doing this. On the other hand, we might be switching the evaluations to use a different tool soon, perhaps getting rid of the 1-5 confidence ratings altogether.\n\n::: \n\n\n\n::: {.cell}\n\n```{.r .cell-code code-summary=\"reconcile explicit bounds and stated confidence level\"}\n# Define the baseline widths for each confidence rating\n# JB: it would be good to have some more backing of whether this\n# is a valid way to translate these confidence levels into CIs\n\n# baseline_widths <- c(4, 8, 15, 25, 37.5)\n\n# Lists of categories\nrating_cats <- c(\"overall\", \"adv_knowledge\", \"methods\", \"logic_comms\", \"real_world\", \"gp_relevance\", \"open_sci\")\n\n#... 'predictions' are currently 1-5 (0-5?)\npred_cats <- c(\"journal_predict\", \"merits_journal\")\n\n#DR: Note that I use these objects in future chapters, but they are not connected to the data frame. Either one saves and reinputs the whole environment (messy, replicability issue), or you save this somewhere and re-input it or connect it to the data frame that gets saved (not sure how to do it), or you hard-code reinput it in the next chapter. \n\n#I do the latter for now, but I'm not happy about it, because the idea is 'input definitions in a single place to use later'\n\n#JB: I don't really understand why not just hard code it in the next chapter. These are very short strings. Do you expect them to change often? If so, we can derive them from a dataframe somewhere in the future or save as a separate object. \n\n\n# JB: Rewritten functions for adding imputed ub and lb\n\n# calculate the lower and upper bounds, \n# rating: given a rating, \n# conf: a confidence (1-5) score,\n# type: a bound type (lower, upper),\n# scale: a scale (100 is 0-100, 5 is 1-5)\n# This function is not vectorized\ncalc_bounds <- function(rating, conf, type, scale) {\n \n if(scale == 5){ #for the 'journal tier prediction case'\n baseline_width = case_match(conf, \n 5 ~ .2, #4*5/100\n 4 ~ .4, #8*5/100\n 3 ~ .75, #15*5/100\n 2 ~ 1.25, #25*5/100\n 1 ~ 1.875, #37.5*5/100\n .default = NA_real_) \n\n upper = min(rating + baseline_width, 5)\n lower = max(rating - baseline_width, 1)\n }#/if(scale == 5)\n \n if(scale == 100){ #for the 'ratings case'\n \n baseline_width = case_match(conf, \n 5 ~ 4, \n 4 ~ 8, \n 3 ~ 15, \n 2 ~ 25, \n 1 ~ 37.5,\n .default = NA_real_)\n \n upper = min(rating + baseline_width, 100)\n lower = max(rating - baseline_width, 0)\n } #/if(scale == 100)\n \n if(type == \"lower\") return(lower)\n if(type == \"upper\") return(upper)\n}\n\n\n# calculate or find correct lower or upper bound\n# based on rating type, and lb, ub, and conf values\nimpute_bounds <- function(var_name, est, lb, ub, conf, bound_type) {\n \n # get scale of each variable\n scale = if_else(var_name %in% c(\"journal_predict\", \"merits_journal\"), # if variable is a prediction\n 5, 100) #scale is 5, else scale is 100\n # if calculating lower bound variable\n if(bound_type == \"lower\") { #we are calculating a lower bound imputation\n # \n calculated_bound = map_dbl(.x = est, .f = calc_bounds, conf = conf, type = bound_type, scale = scale)\n \n imp_bound = if_else(is.na(lb), calculated_bound, lb)\n }\n \n # if calculating upper bound variable\n if(bound_type == \"upper\") { #we are calculating an upper bound imputation\n # \n calculated_bound = map_dbl(.x = est, .f = calc_bounds, conf = conf, type = bound_type, scale = scale)\n imp_bound = if_else(is.na(ub), calculated_bound, ub)\n }\n \n return(imp_bound)\n}\n\n# apply functions to evals_pub_long\n# where each row is one type of rating\n# so each evaluation is 9 rows long\nevals_pub_long <- evals_pub_long %>% \n rowwise() %>% # apply function to each row\n mutate(lb_imp_ = impute_bounds(var_name = rating_type,\n est = est_,\n lb = lb_, ub = ub_, conf = conf_,\n bound_type = \"lower\")) %>% \n mutate(ub_imp_ = impute_bounds(var_name = rating_type,\n est = est_,\n lb = lb_, ub = ub_, conf = conf_,\n bound_type = \"upper\"))\n\n# Reshape evals_pub_long into evals_pub to add imputed bounds\nevals_pub <- evals_pub_long %>% \n pivot_wider(names_from = rating_type, # take the dataframe back to old format\n values_from = c(est_, ub_, lb_, conf_, lb_imp_, ub_imp_),\n names_sep = \"\") %>% \n dplyr::rename_with(.cols = matches(\"^[ul]b_imp\"),\n .fn = gsub,\n pattern = \"(ub_imp|lb_imp)_(.+)\", \n replacement = \"\\\\2_\\\\1\") %>% \n dplyr::rename_with(.cols = starts_with(\"est_\"),\n .fn = gsub,\n pattern = \"est_(.+)\",\n replacement = \"\\\\1\")\n\n# Clean evals_pub_long names (remove _ at end)\nevals_pub_long <- evals_pub_long %>% \n rename_with(.cols = ends_with(\"_\"),\n .fn = str_remove,\n pattern = \"_$\")\n```\n:::\n\n\n\nWe cannot publicly share the 'papers under consideration', but we can share some of the statistics on these papers. Let's generate an ID (or later, salted hash) for each such paper, and keep only the shareable features of interest\n\n\n::: {.cell}\n\n```{.r .cell-code}\nall_papers_p <- all_pub_records %>% \n dplyr::select(\n id,\n category,\n cfdc_DR,\n 'confidence -- user entered',\n cfdc_assessor,\n avg_cfdc,\n category,\n cause_cat_1_text,\n cause_cat_2_text,\n topic_subfield_text,\n eval_manager_text,\n 'publication status',\n 'Contacted author?',\n 'stage of process/todo',\n 'source_main', \n 'author permission?',\n'Direct Kotahi Prize Submission?',\n 'createdTime' \n )\n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create a dataset to be used as the input to a shiny app\n\n#| include: false\n#| \npaper_ratings <- evals_pub %>% \n select(paper_abbrev, eval_name, one_of(rating_cats), one_of(pred_cats), ends_with(\"_imp\")) %>% # rating vars\n dplyr::rename_with(.cols = c(one_of(rating_cats), one_of(pred_cats)),\n .fn = gsub,\n pattern = \"(.+)\", \n replacement = \"\\\\1_best\") %>% \n pivot_longer(cols = -c(paper_abbrev, eval_name),\n names_pattern = \"(overall|adv_knowledge|methods|logic_comms|real_world|gp_relevance|open_sci|journal_predict|merits_journal)_(.+)\",\n names_to = c(\"rating_type\", \".value\"))# one line per rating type\n\nwrite_rds(paper_ratings, file = here(\"shinyapp/DataExplorer\", \"shiny_explorer.rds\"))\n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code code-summary=\"save data for others' use\"}\nall_papers_p %>% saveRDS(file = here(\"data\", \"all_papers_p.Rdata\"))\nall_papers_p %>% write_csv(file = here(\"data\", \"all_papers_p.csv\"))\n\nevals_pub %>% saveRDS(file = here(\"data\", \"evals.Rdata\"))\nevals_pub %>% write_csv(file = here(\"data\", \"evals.csv\"))\n\nevals_pub_long %>% write_rds(file = here(\"data\", \"evals_long.rds\"))\nevals_pub_long %>% write_csv(file = here(\"data\", \"evals_long.csv\"))\n\n#evals_pub %>% readRDS(file = here(\"data\", \"evals.Rdata\"))\n```\n:::\n", - "supporting": [ - "evaluation_data_input_files" - ], + "markdown": "# Evaluation data: input/features\n\n\n::: {.cell}\n\n```{.r .cell-code code-summary=\"load packages\"}\nlibrary(tidyverse) \n\n# data acquisition ----\n#devtools::install_github(\"bergant/airtabler\")\nlibrary(airtabler)\n\n# data cleaning & shaping ----\n\n# data analysis ----\n# library(lme4)\n# library(lmtest) # Testing Linear Regression Models\n\n# markdown et al. ----\nlibrary(knitr)\nlibrary(bookdown)\nlibrary(quarto)\nlibrary(formattable) # Create 'Formattable' Data Structures\n\n# others ----\nlibrary(here) # A Simpler Way to Find Your Files\n#devtools::install_github(\"metamelb-repliCATS/aggreCAT\")\n#library(aggrecat)\n\n# Make sure select is always the dplyr version\nselect <- dplyr::select \n\n# options\noptions(knitr.duplicate.label = \"allow\")\n```\n:::\n\n\n\n::: {.callout-note collapse=\"true\"}\n## Note on data input (10-Aug-23)\n\nBelow, the evaluation data is input from an Airtable, which itself was largely hand-input from evaluators' reports. As PubPub builds (target: end of Sept. 2023), this will allow us to include the ratings and predictions as structured data objects. We then plan to access and input this data *directly* from the PubPub (API?) into the present analysis. This will improve automation and limit the potential for data entry errors.\n\n::: \n\n\n::: {.cell}\n\n```{.r .cell-code code-summary=\"input from airtable\"}\nbase_id <- \"appbPYEw9nURln7Qg\"\n\n\n# Set your Airtable API key \nSys.setenv(AIRTABLE_API_KEY = Sys.getenv(\"AIRTABLE_API_KEY\"))\n#this should be set in my .Renviron file\n\n# Read data from a specific view\n\nevals <- air_get(base = base_id, \"output_eval\") \n\nall_pub_records <- data.frame()\npub_records <- air_select(base = base_id, table = \"crucial_research\")\n\n# Append the records to the list\nall_pub_records <- bind_rows(all_pub_records, pub_records)\n\n# While the length of the records list is 100 (the maximum), fetch more records\nwhile(nrow(pub_records) == 100) {\n # Get the ID of the last record in the list\n offset <- get_offset(pub_records)\n \n # Fetch the next 100 records, starting after the last ID\n pub_records <- air_select(base = base_id, table = \"crucial_research\", offset = offset)\n \n # Append the records to the df\n all_pub_records <- bind_rows(all_pub_records, pub_records)\n}\n\n# housekeeping\nrm(pub_records)\n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code code-summary=\"just the useful and publish-able data, clean a bit\"}\n# clean evals names to snakecase\ncolnames(evals) <- snakecase::to_snake_case(colnames(evals))\n\nevals_pub <- evals %>% \n dplyr::rename(stage_of_process = stage_of_process_todo_from_crucial_research_2) %>% \n mutate(stage_of_process = unlist(stage_of_process)) %>% \n dplyr::filter(stage_of_process == \"published\") %>% \n select(id, \n crucial_research, \n paper_abbrev, \n evaluator_name, \n category, \n source_main, \n author_agreement, \n overall, \n lb_overall, \n ub_overall, \n conf_index_overall, \n advancing_knowledge_and_practice, \n lb_advancing_knowledge_and_practice, \n ub_advancing_knowledge_and_practice, \n conf_index_advancing_knowledge_and_practice,\n methods_justification_reasonableness_validity_robustness,\n lb_methods_justification_reasonableness_validity_robustness,\n ub_methods_justification_reasonableness_validity_robustness,\n conf_index_methods_justification_reasonableness_validity_robustness, \n logic_communication, lb_logic_communication, ub_logic_communication, \n conf_index_logic_communication,\n engaging_with_real_world_impact_quantification_practice_realism_and_relevance,\n lb_engaging_with_real_world_impact_quantification_practice_realism_and_relevance,\n ub_engaging_with_real_world_impact_quantification_practice_realism_and_relevance,\n conf_index_engaging_with_real_world_impact_quantification_practice_realism_and_relevance,\n relevance_to_global_priorities, \n lb_relevance_to_global_priorities, \n ub_relevance_to_global_priorities, \n conf_index_relevance_to_global_priorities, \n journal_quality_predict, \n lb_journal_quality_predict, \n ub_journal_quality_predict,\n conf_index_journal_quality_predict, \n open_collaborative_replicable, \n conf_index_open_collaborative_replicable, \n lb_open_collaborative_replicable, \n ub_open_collaborative_replicable, \n merits_journal, \n lb_merits_journal, \n ub_merits_journal, \n conf_index_merits_journal)\n\n# shorten names (before you expand into columns)\nnew_names <- c(\n \"eval_name\" = \"evaluator_name\",\n \"cat\" = \"category\",\n \"crucial_rsx\" = \"crucial_research\",\n \"conf_overall\" = \"conf_index_overall\",\n \"adv_knowledge\" = \"advancing_knowledge_and_practice\",\n \"lb_adv_knowledge\" = \"lb_advancing_knowledge_and_practice\",\n \"ub_adv_knowledge\" = \"ub_advancing_knowledge_and_practice\",\n \"conf_adv_knowledge\" = \"conf_index_advancing_knowledge_and_practice\",\n \"methods\" = \"methods_justification_reasonableness_validity_robustness\",\n \"lb_methods\" = \"lb_methods_justification_reasonableness_validity_robustness\",\n \"ub_methods\" = \"ub_methods_justification_reasonableness_validity_robustness\",\n \"conf_methods\" = \"conf_index_methods_justification_reasonableness_validity_robustness\",\n \"logic_comms\" = \"logic_communication\",\n \"lb_logic_comms\" = \"lb_logic_communication\",\n \"ub_logic_comms\" = \"ub_logic_communication\",\n \"conf_logic_comms\" = \"conf_index_logic_communication\",\n \"real_world\" = \"engaging_with_real_world_impact_quantification_practice_realism_and_relevance\",\n \"lb_real_world\" = \"lb_engaging_with_real_world_impact_quantification_practice_realism_and_relevance\",\n \"ub_real_world\" = \"ub_engaging_with_real_world_impact_quantification_practice_realism_and_relevance\",\n \"conf_real_world\" = \"conf_index_engaging_with_real_world_impact_quantification_practice_realism_and_relevance\",\n \"gp_relevance\" = \"relevance_to_global_priorities\",\n \"lb_gp_relevance\" = \"lb_relevance_to_global_priorities\",\n \"ub_gp_relevance\" = \"ub_relevance_to_global_priorities\",\n \"conf_gp_relevance\" = \"conf_index_relevance_to_global_priorities\",\n \"journal_predict\" = \"journal_quality_predict\",\n \"lb_journal_predict\" = \"lb_journal_quality_predict\",\n \"ub_journal_predict\" = \"ub_journal_quality_predict\",\n \"conf_journal_predict\" = \"conf_index_journal_quality_predict\",\n \"open_sci\" = \"open_collaborative_replicable\",\n \"conf_open_sci\" = \"conf_index_open_collaborative_replicable\",\n \"lb_open_sci\" = \"lb_open_collaborative_replicable\",\n \"ub_open_sci\" = \"ub_open_collaborative_replicable\",\n \"conf_merits_journal\" = \"conf_index_merits_journal\"\n)\n\nevals_pub <- evals_pub %>%\n rename(!!!new_names)\n\n# Create a list of labels with the old, longer names\nlabels <- str_replace_all(new_names, \"_\", \" \") %>% str_to_title()\n\n# Assign labels to the dataframe / tibble\n# (maybe this can be done as an attribute, not currently working)\n# for(i in seq_along(labels)) {\n# col_name <- new_names[names(new_names)[i]]\n# label <- labels[i]\n# attr(evals_pub[[col_name]], \"label\") <- label\n# }\n\n\n# expand categories into columns, unlist everything\nevals_pub %<>%\n tidyr::unnest_wider(cat, names_sep = \"_\") %>% # give each of these its own col\n mutate(across(everything(), unlist)) # maybe check why some of these are lists in the first place\n \n\n# clean the Anonymous names\nevals_pub$eval_name <- ifelse(\n grepl(\"^\\\\b\\\\w+\\\\b$|\\\\bAnonymous\\\\b\", evals_pub$eval_name),\n paste0(\"Anonymous_\", seq_along(evals_pub$eval_name)),\n evals_pub$eval_name\n)\n\n#housekeeping\nrm(evals)\n\n#Todo -- check the unlist is not propagating the entry\n#Note: category, topic_subfield, and source have multiple meaningful categories. These will need care \n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\nevals_pub_long <- evals_pub %>% \n pivot_longer(cols = -c(id, crucial_rsx, paper_abbrev, eval_name, \n cat_1,cat_2, cat_3,source_main,author_agreement),\n names_pattern = \"(lb_|ub_|conf_)?(.+)\",\n names_to = c(\"value_type\", \"rating_type\")) %>% # one line per rating type\n mutate(value_type = if_else(value_type == \"\", \"est_\", value_type)) %>% #add main rating id\n pivot_wider(names_from = value_type, \n values_from = value)\n```\n:::\n\n\n\n\n\n### Reconcile uncertainty ratings and CIs {-}\n\nWhere people gave only confidence level 'dots', we impute CIs (confidence/credible intervals). We follow the correspondence described [here](https://effective-giving-marketing.gitbook.io/unjournal-x-ea-and-global-priorities-research/policies-projects-evaluation-workflow/evaluation/guidelines-for-evaluators#1-5-dots-explanation-and-relation-to-cis). (Otherwise, where they gave actual CIs, we use these.)^[Note this is only a first-pass; a more sophisticated approach may be warranted in future.]\n\n::: {.callout-note collapse=\"true\"}\n## Dots to interval choices\n\n> 5 = Extremely confident, i.e., 90% confidence interval spans +/- 4 points or less)\n\nFor 0-100 ratings, code the LB as $max(R - 4,0)$ and the UB as $min(R + 4,100)$, where R is the stated (middle) rating. This 'scales' the CI, as interpreted, to be proportional to the rating, with a maximum 'interval' of about 8, with the rating is about 96.\n\n> 4 = Very*confident: 90% confidence interval +/- 8 points or less\n\nFor 0-100 ratings, code the LB as $max(R - 8,0)$ and the UB as $min(R + 8,100)$, where R is the stated (middle) rating. \n\n> 3 = Somewhat** confident: 90% confidence interval +/- 15 points or less \n\n> 2 = Not very** confident: 90% confidence interval, +/- 25 points or less\n\nComparable scaling for the 2-3 ratings as for the 4 and 5 rating.\n\n> 1 = Not** confident: (90% confidence interval +/- more than 25 points)\n \nCode LB as $max(R - 37.5,0)$ and the UB as $min(R + 37.5,100)$. \n \nThis is just a first-pass. There might be a more information-theoretic way of doing this. On the other hand, we might be switching the evaluations to use a different tool soon, perhaps getting rid of the 1-5 confidence ratings altogether.\n\n::: \n\n\n\n::: {.cell}\n\n```{.r .cell-code code-summary=\"reconcile explicit bounds and stated confidence level\"}\n# Define the baseline widths for each confidence rating\n# JB: it would be good to have some more backing of whether this\n# is a valid way to translate these confidence levels into CIs\n\n# baseline_widths <- c(4, 8, 15, 25, 37.5)\n\n# Lists of categories\nrating_cats <- c(\"overall\", \"adv_knowledge\", \"methods\", \"logic_comms\", \"real_world\", \"gp_relevance\", \"open_sci\")\n\n#... 'predictions' are currently 1-5 (0-5?)\npred_cats <- c(\"journal_predict\", \"merits_journal\")\n\n#DR: Note that I use these objects in future chapters, but they are not connected to the data frame. Either one saves and reinputs the whole environment (messy, replicability issue), or you save this somewhere and re-input it or connect it to the data frame that gets saved (not sure how to do it), or you hard-code reinput it in the next chapter. \n\n#I do the latter for now, but I'm not happy about it, because the idea is 'input definitions in a single place to use later'\n\n#JB: I don't really understand why not just hard code it in the next chapter. These are very short strings. Do you expect them to change often? If so, we can derive them from a dataframe somewhere in the future or save as a separate object. \n\n\n# JB: Rewritten functions for adding imputed ub and lb\n\n# calculate the lower and upper bounds, \n# rating: given a rating, \n# conf: a confidence (1-5) score,\n# type: a bound type (lower, upper),\n# scale: a scale (100 is 0-100, 5 is 1-5)\n# This function is not vectorized\ncalc_bounds <- function(rating, conf, type, scale) {\n \n if(scale == 5){ #for the 'journal tier prediction case'\n baseline_width = case_match(conf, \n 5 ~ .2, #4*5/100\n 4 ~ .4, #8*5/100\n 3 ~ .75, #15*5/100\n 2 ~ 1.25, #25*5/100\n 1 ~ 1.875, #37.5*5/100\n .default = NA_real_) \n\n upper = min(rating + baseline_width, 5)\n lower = max(rating - baseline_width, 1)\n }#/if(scale == 5)\n \n if(scale == 100){ #for the 'ratings case'\n \n baseline_width = case_match(conf, \n 5 ~ 4, \n 4 ~ 8, \n 3 ~ 15, \n 2 ~ 25, \n 1 ~ 37.5,\n .default = NA_real_)\n \n upper = min(rating + baseline_width, 100)\n lower = max(rating - baseline_width, 0)\n } #/if(scale == 100)\n \n if(type == \"lower\") return(lower)\n if(type == \"upper\") return(upper)\n}\n\n\n# calculate or find correct lower or upper bound\n# based on rating type, and lb, ub, and conf values\nimpute_bounds <- function(var_name, est, lb, ub, conf, bound_type) {\n \n # get scale of each variable\n scale = if_else(var_name %in% c(\"journal_predict\", \"merits_journal\"), # if variable is a prediction\n 5, 100) #scale is 5, else scale is 100\n # if calculating lower bound variable\n if(bound_type == \"lower\") { #we are calculating a lower bound imputation\n # \n calculated_bound = map_dbl(.x = est, .f = calc_bounds, conf = conf, type = bound_type, scale = scale)\n \n imp_bound = if_else(is.na(lb), calculated_bound, lb)\n }\n \n # if calculating upper bound variable\n if(bound_type == \"upper\") { #we are calculating an upper bound imputation\n # \n calculated_bound = map_dbl(.x = est, .f = calc_bounds, conf = conf, type = bound_type, scale = scale)\n imp_bound = if_else(is.na(ub), calculated_bound, ub)\n }\n \n return(imp_bound)\n}\n\n# apply functions to evals_pub_long\n# where each row is one type of rating\n# so each evaluation is 9 rows long\nevals_pub_long <- evals_pub_long %>% \n rowwise() %>% # apply function to each row\n mutate(lb_imp_ = impute_bounds(var_name = rating_type,\n est = est_,\n lb = lb_, ub = ub_, conf = conf_,\n bound_type = \"lower\")) %>% \n mutate(ub_imp_ = impute_bounds(var_name = rating_type,\n est = est_,\n lb = lb_, ub = ub_, conf = conf_,\n bound_type = \"upper\"))\n\n# Reshape evals_pub_long into evals_pub to add imputed bounds\nevals_pub <- evals_pub_long %>% \n pivot_wider(names_from = rating_type, # take the dataframe back to old format\n values_from = c(est_, ub_, lb_, conf_, lb_imp_, ub_imp_),\n names_sep = \"\") %>% \n dplyr::rename_with(.cols = matches(\"^[ul]b_imp\"),\n .fn = gsub,\n pattern = \"(ub_imp|lb_imp)_(.+)\", \n replacement = \"\\\\2_\\\\1\") %>% \n dplyr::rename_with(.cols = starts_with(\"est_\"),\n .fn = gsub,\n pattern = \"est_(.+)\",\n replacement = \"\\\\1\")\n\n# Clean evals_pub_long names (remove _ at end)\nevals_pub_long <- evals_pub_long %>% \n rename_with(.cols = ends_with(\"_\"),\n .fn = str_remove,\n pattern = \"_$\")\n```\n:::\n\n\n\nWe cannot publicly share the 'papers under consideration', but we can share some of the statistics on these papers. Let's generate an ID (or later, salted hash) for each such paper, and keep only the shareable features of interest\n\n\n::: {.cell}\n\n```{.r .cell-code}\nall_papers_p <- all_pub_records %>% \n dplyr::select(\n id,\n category,\n cfdc_DR,\n 'confidence -- user entered',\n cfdc_assessor,\n avg_cfdc,\n category,\n cause_cat_1_text,\n cause_cat_2_text,\n topic_subfield_text,\n eval_manager_text,\n 'publication status',\n 'Contacted author?',\n 'stage of process/todo',\n 'source_main', \n 'author permission?',\n'Direct Kotahi Prize Submission?',\n 'createdTime' \n )\n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code}\n# Create a dataset to be used as the input to a shiny app\n\n#| include: false\n#| \nevals_pub_long %>% \n write_rds(file = here(\"shinyapp/DataExplorer\", \"shiny_explorer.rds\"))\n```\n:::\n\n::: {.cell}\n\n```{.r .cell-code code-summary=\"save data for others' use\"}\nall_papers_p %>% saveRDS(file = here(\"data\", \"all_papers_p.Rdata\"))\nall_papers_p %>% write_csv(file = here(\"data\", \"all_papers_p.csv\"))\n\nevals_pub %>% saveRDS(file = here(\"data\", \"evals.Rdata\"))\nevals_pub %>% write_csv(file = here(\"data\", \"evals.csv\"))\n\nevals_pub_long %>% write_rds(file = here(\"data\", \"evals_long.rds\"))\nevals_pub_long %>% write_csv(file = here(\"data\", \"evals_long.csv\"))\n\n#evals_pub %>% readRDS(file = here(\"data\", \"evals.Rdata\"))\n```\n:::\n", + "supporting": [], "filters": [ "rmarkdown/pagebreak.lua" ], diff --git a/chapters/evaluation_data_analysis.qmd b/chapters/evaluation_data_analysis.qmd index fed405c..8f5de56 100644 --- a/chapters/evaluation_data_analysis.qmd +++ b/chapters/evaluation_data_analysis.qmd @@ -145,9 +145,6 @@ rename_dtstuff <- function(df){ # Need to find a way to control column width but it seems to be a problem with DT # https://github.com/rstudio/DT/issues/29 -# we didn't seem to be using all_evals_dt so I removed it to increase readability - - evals_pub_df <- evals_pub %>% # Arrange data diff --git a/chapters/evaluation_data_input.qmd b/chapters/evaluation_data_input.qmd index 9778b0d..2de334a 100644 --- a/chapters/evaluation_data_input.qmd +++ b/chapters/evaluation_data_input.qmd @@ -411,17 +411,8 @@ all_papers_p <- all_pub_records %>% #| include: false #| -paper_ratings <- evals_pub %>% - select(paper_abbrev, eval_name, one_of(rating_cats), one_of(pred_cats), ends_with("_imp")) %>% # rating vars - dplyr::rename_with(.cols = c(one_of(rating_cats), one_of(pred_cats)), - .fn = gsub, - pattern = "(.+)", - replacement = "\\1_best") %>% - pivot_longer(cols = -c(paper_abbrev, eval_name), - names_pattern = "(overall|adv_knowledge|methods|logic_comms|real_world|gp_relevance|open_sci|journal_predict|merits_journal)_(.+)", - names_to = c("rating_type", ".value"))# one line per rating type - -write_rds(paper_ratings, file = here("shinyapp/DataExplorer", "shiny_explorer.rds")) +evals_pub_long %>% + write_rds(file = here("shinyapp/DataExplorer", "shiny_explorer.rds")) ``` diff --git a/docs/chapters/evaluation_data_input.html b/docs/chapters/evaluation_data_input.html index 7d05036..739bd82 100644 --- a/docs/chapters/evaluation_data_input.html +++ b/docs/chapters/evaluation_data_input.html @@ -583,17 +583,8 @@ #| include: false #| -paper_ratings <- evals_pub %>% - select(paper_abbrev, eval_name, one_of(rating_cats), one_of(pred_cats), ends_with("_imp")) %>% # rating vars - dplyr::rename_with(.cols = c(one_of(rating_cats), one_of(pred_cats)), - .fn = gsub, - pattern = "(.+)", - replacement = "\\1_best") %>% - pivot_longer(cols = -c(paper_abbrev, eval_name), - names_pattern = "(overall|adv_knowledge|methods|logic_comms|real_world|gp_relevance|open_sci|journal_predict|merits_journal)_(.+)", - names_to = c("rating_type", ".value"))# one line per rating type - -write_rds(paper_ratings, file = here("shinyapp/DataExplorer", "shiny_explorer.rds")) +evals_pub_long %>% + write_rds(file = here("shinyapp/DataExplorer", "shiny_explorer.rds"))
@@ -1322,38 +1313,29 @@ #| include: false #| -paper_ratings <- evals_pub %>% - select(paper_abbrev, eval_name, one_of(rating_cats), one_of(pred_cats), ends_with("_imp")) %>% # rating vars - dplyr::rename_with(.cols = c(one_of(rating_cats), one_of(pred_cats)), - .fn = gsub, - pattern = "(.+)", - replacement = "\\1_best") %>% - pivot_longer(cols = -c(paper_abbrev, eval_name), - names_pattern = "(overall|adv_knowledge|methods|logic_comms|real_world|gp_relevance|open_sci|journal_predict|merits_journal)_(.+)", - names_to = c("rating_type", ".value"))# one line per rating type +evals_pub_long %>% + write_rds(file = here("shinyapp/DataExplorer", "shiny_explorer.rds")) + +``` + + +```{r save data} +#| label: savedata +#| code-summary: "save data for others' use" -write_rds(paper_ratings, file = here("shinyapp/DataExplorer", "shiny_explorer.rds")) - -``` + +all_papers_p %>% saveRDS(file = here("data", "all_papers_p.Rdata")) +all_papers_p %>% write_csv(file = here("data", "all_papers_p.csv")) - -```{r save data} -#| label: savedata -#| code-summary: "save data for others' use" - +evals_pub %>% saveRDS(file = here("data", "evals.Rdata")) +evals_pub %>% write_csv(file = here("data", "evals.csv")) + +evals_pub_long %>% write_rds(file = here("data", "evals_long.rds")) +evals_pub_long %>% write_csv(file = here("data", "evals_long.csv")) -all_papers_p %>% saveRDS(file = here("data", "all_papers_p.Rdata")) -all_papers_p %>% write_csv(file = here("data", "all_papers_p.csv")) - -evals_pub %>% saveRDS(file = here("data", "evals.Rdata")) -evals_pub %>% write_csv(file = here("data", "evals.csv")) - -evals_pub_long %>% write_rds(file = here("data", "evals_long.rds")) -evals_pub_long %>% write_csv(file = here("data", "evals_long.csv")) - -#evals_pub %>% readRDS(file = here("data", "evals.Rdata")) - -``` +#evals_pub %>% readRDS(file = here("data", "evals.Rdata")) + +```
diff --git a/shinyapp/DataExplorer/app.R b/shinyapp/DataExplorer/app.R index 1b62e44..980d9c0 100644 --- a/shinyapp/DataExplorer/app.R +++ b/shinyapp/DataExplorer/app.R @@ -19,7 +19,7 @@ my_pal = colorRampPalette(brewer.pal(8, "Set1"))(color_count) df <- df %>% group_by(paper_abbrev, rating_type) %>% mutate(n_evals = n(), # number of evaluators for each paper - rating_mean = mean(best, na.rm = T)) %>% # replace with aggreCAT functions later + rating_mean = mean(est, na.rm = T)) %>% # replace with aggreCAT functions later ungroup() %>% nest(.by = paper_abbrev) %>% mutate(paper_color = my_pal) %>% # give each paper its own color @@ -35,7 +35,7 @@ ui <- fluidPage( # Application title titlePanel("Unjournal Evaluation Data"), - # Sidebar with a slider input for number of bins + # Sidebar with plot options sidebarLayout( sidebarPanel( selectInput(inputId = "RatingType", @@ -50,13 +50,14 @@ ui <- fluidPage( choices = unique(df$paper_abbrev), selected = unique(df$paper_abbrev), inline = FALSE, - width = NULL, + width = "200px", choiceNames = NULL, choiceValues = NULL - ) - ), + ), + width = 3 + ), fluid = F, position = "right", - # Show a plot of the generated distribution + # Show the plot with selected info mainPanel( plotOutput(outputId = "distPlot", width = "100%") @@ -64,7 +65,7 @@ ui <- fluidPage( ) ) -# Define server logic required to draw a histogram +# Define server logic required to draw plot server <- function(input, output) { output$distPlot <- renderPlot({ @@ -79,7 +80,7 @@ server <- function(input, output) { filter(paper_abbrev %in% input$IncludedPapers) %>% filter(!is.na(rating_mean)) %>% mutate(paper_abbrev = fct_reorder(paper_abbrev, rating_mean)) %>% - ggplot(aes(x = paper_abbrev, y = best, text = eval_name)) + # dont remove eval name or posdodge stops working (???) + ggplot(aes(x = paper_abbrev, y = est, text = eval_name)) + # dont remove eval name or posdodge stops working (???) geom_point(aes(color = paper_color), stat = "identity", size = 2, shape = 18, stroke = 1, position = pd) + @@ -96,7 +97,7 @@ server <- function(input, output) { scale_x_discrete(labels = function(x) str_wrap(x, width = 20)) + scale_color_identity() # to use paper_color as colors - }, height = 600, width = 600) + }, height = 600, width = 700) } # Run the application diff --git a/shinyapp/DataExplorer/shiny_explorer.rds b/shinyapp/DataExplorer/shiny_explorer.rds index c71b86a52e210f010316b2daad38a3b1a3fe078c..f76e72460388f9090ac4254ae8970ed5d24dd6a5 100644 GIT binary patch literal 77498 zcmeHQ>31B*abJ-TC6XXaQL>#oVA@AR3?)-mgzcnJVXC7nd7h0e`opcy-Zi;B;i*=x6nTuW*vyk>+#0)k{aSuz@qs8{rvy~)U%vft$Q^bI{`lQ_QNT_K@Ji+T zmxmvX&N`1T*9JuaJ0;-3rNw)dpj7fU7IR~=k?E9x+UVnIZu-%k>CwfJYp(EMrv!w> z;o-{d#q}%Y`{AH0V5bB$3i;`&T)lt&UU6jLE#bkA3GmkLht8wYy*sx@A5O{wwsk-< z0fot@XtCn@*05W3S3)oH+=ew*cbx|}tgJQdL|)*pdqvmEuX|DChSun#x*K{m*N?23 zFsKEQ=a;Por%>?xII=b2)4yWiyA98=E@Z1ty}DxA@bH-Y91O^P_ja5CaRulO-eyfY|#IqRjvjr zPSq-?mLm#-dWD*#LQtzap%xfZ zKXCmOH!M@T6}LcWc8praTBdH9JF(pwh_BoZ-I^O-w5IwlT6f$v&%bC*_F0Z!wC4Pc zq6!)AQ(-j5xbIOT*7s1Y0vYBeuXmxgjyzTyVq zMx0tQ%2TarpBm6*N3+(9Lj%7?97+Sfg6kG(h!p6d;N|>9@v=Ys2hyQydph0w0ajm(BtMtINVbMdsTeZfhN0oXnYl0wUMGpeQPNQPwL&tBF(9Jm3Ok<-^2}teG zsc)$HhH~iCEXpbgs0L-PKv^kGM+<=;)VzYVNJHD+6_+|E?t@I(jo(e3H}|_~>r6Xu z`nb;P-zSG0-&b!T*1c-6>Xlq83LSc5QE+@~C7@YHtLWC$5JAl?uKJ-{UZs~3YAUfp zgPw?H5Q|o$?iT3H2agy`ipGwuRC~-UikU?*vnXa3#mu5KcNV4X+F>*GVWvLJ)Q6e+ zFjF69>cdQZbl%j*QSRTrbY8uy+WF2&yrt6bgXpbMah2Ab1^x|dbd8p>s>KlTdIySv zH%NC^8g952zsYKxvM$s^T3ZpU(i<}`s=9O?UM_7X7Qb{F@`570#tIuAtv92$Txzs2 z?|W);X)dCvrN*1*c}mG>t2|(Ao5`4(U}{3L>w~EY&#xx5-J`lu*v#>oIbJizYvy>H zJIDK(Y`H=s)r-~)Eu)@PpRrxEhWc(1W5q*$77h1J#LKMvul8A1cKd^r3Hc$)j&*5c zVXz)m_;JAGqCep=7Bh8>Kit~;elaUD%!-UwT9I+^_0ds}^a0Pf>r|skfxe(rpYx35 zhPfXyxf%6XxJ@)w=)R{*6uc_qQr=+`89d`WT z5^aFu(98yPzH_uRIdaPz@mk~wzo5RlQcIV|S80VZeFIhZ=+i0DsmJfB#ww0aTd9Pu z@6@PUKR+Cl=^c~$d}_$8IP{`Q?SP>3s;M0p;!ECWSh-adHWSdw=ZS`uydF~*=G?Lq z7OT7o!w|h2u6jOwBb=x|;?cef6m}l_C75!lwd2NLrXE_3xxY-iVcNs?Y7fmeV`iH% zv&~rNZZp=*`}@o7u@3ERezZ60K95y?#=2GO`K_Te)-~4E3u@c6GSt-jox|Sx#$@_j zcNQ|{HRe5#EvigKG8M^GqR# z;3FE>=hR%f%N6OHKTCTrFhA#(eeaQ(jd`FlI zwh!KErj_1!!man&Z6^AzLkSMWGNzq6n0D%k?P%Jk)@Yyh?FY4=WS>=fq)sHGk#e!` zhUISW5*e)Egb)+&+3fPfcan*CFiqF1*@<}jBC5?k4XET^M4cLBMjOo;ZS2$TpQ3F} zsHvr0PSo}%1!{4{7ABSLeM`1CEiqTLmaRIq0r`+}of{s?^GEx)FD5T1c_iOyA!bVO zaHa(JWoI+|e751|liKh6_F=*|k`s#61Z`cOO3b&ti5VZcY^-Fpdkryf8xHqv!~X2u zX~T=X%dSaHM1y1lwW?@pxsd&_8hEA>K z>RQ+8GBFGr#GTUOvNKuV3s=h(>YmC@gr=!Ygx$ipZ+(ElTU~~(R+gg`Wj*7L?>iOR znKpV{bwjpvyGc)F{Q`9ej_*2Lf>x7=dN6QlcYSV2*{N*RePj)(b2hbATSc<-DY@?#!=MJ=xxR?X>Y>b3HYu5C&=`6djaPK;O}B(pJB3KS;ml-S^4#+s&UJ ztgkFlC*d|~zdFh6vMilC8vKxQ$>Kk6{2^*|UeM0RotIYVOd7B8fJd0kV!I#G3~WRv z>Zr4FFxqW#*~#>WqWF zewTu+CHS~F&&?A@G;^zPRD)an#@ngiLDku$Y93Xe?^*7HHE**zeVy+?6{pqGNosF< zjcpFfhV5DQvov&i@0o{B2W3N~lh{(nmDNWcG;yzs~E`WaWGHyhi}KEJ54Iq8TBw1`cAcXdT2DqPjl@eJlR-X zp^c>?7i-a5MbN9Qs?MM8ZERGl&dO?|*D1Sx(dkXzw)YmD$XTJ2quJ`_qV_Pv#~Q2U zvfH3qXZ2d{qvZCfVk+*ag|^Bqw@VLmJGIB{^i+O2YIoU(m-meBZrZRDe=7f*i#qt` zHqLDi=kDrqTSZ{nL-WAV+mcUSZCx{s@@+a#VjTC>j%$@JZI>Qx?bBVQX}%z}0B^MB zcvU^G_)?SD6WMa;YAucy*PSgq@Pl=7LRnj&)n!M^Lpw{`>c06P{dShXaCnc>>6U0# zd!EX-xn@o&bHv%^`r?BcCnL*b4%!CI^>T~cV$Cu_YKR@sHw=#6mgtOATwB%yKAr%5 zsYd%31x0*edb%2vy~46OFA1VkA#HHJydH#A^j+QM`Z9mFulH;E$UDKRTI8@?58a|y zz^bR0gSzW4HwqqS*Hd#tFKR4vY$#zAH5SdBr1ht!jw&mdn+Yp+O!Jg1A~Wr%>6uIy zY6c}{^pr8qtIOr}SDt!3Y(|LJGn-rPxN_uyZSqh;({91ecQjstAI z?%nql`_dQfd@RnSdAmC$XCCqG>q)r1xD^9Cmr$L~G>3Ekkb~L_o5^*ctcOk?a?n>V z$3N(AgXes;9O=M5xE^xY1L5H368*Ja2nR(vps#WHCYXXpK65P3F`e0pfxW%$~ z@y{M-{uM6g0`tf($JZf!*d0`Tj6grFKX}API^daVe>`bAxveB0q{H^n>2N-pPp6x{ z9`;MydB#ri>556lMm+cn;kXXrIc0L694 z7y2My$dMl8ppe7QX?x|l{GdpW?W6q#d%<5hwkz~QelskG-yly>oi6L6`4QIVdzeCB zPEW@}xsqrZ-Ci+Iq=)|AL;O~uTF=2mQ5$wmmk)N}`!%hMLu7kfheb`U?1J@xQ z^N9DX)E?ye0}6f9erCOIzR&SF96U*T=&$wK`-RO|VeN;dw-cV_uph?z_PID%KR@Vj zjQc>5F6sf~psywA;Qp|gM|#gn?ZwTc92loSA5h4dB0tnC&Chc>`K=_qyq!?A^T;P% zuGu(R2G1=>uT`jSx8Wbqe4;n>wA1#1-45Dcxy{6`xvhla{-3dT%HhBCc;PI!|IiN< z@j1P{$^|*7Js)SA(d`=ijCOyL!}-3~*Ks`n->FoO?_gilfAFC21N_DH%Lb2lX_~U5 zuCHA0^gP5t*F*F_Y0C8y{?z#=)gYtWN9Kvd?Y)iu2^8_crzyhIG!;+l!FJR$_LEP!5zcP1E7vQ{~qA;d_IQi^2}D3n=Ubig;-ra!_2q z7oYYA;z14`=^#H)go9_gU;8}o_Du7uI9*&1dqcj+=`1mYe-ID#5PGC3><|0E z52sT4Azjo1qz@j~K@U)*GqV{di04)P+=F_I`xKzO=-XOsB49t!rShW@Ze+Fsya;q(w6 zeCj%_$3hZc^YeT?U#I0fudJVwP;b-oIk0zbD^5ty=dfI-x5(k33!6!Nlml|m*W%#J zuK66~H^ z)AKp@BJ)d|$@S2K{bet0C35Ial6l0(_3*=toumUf^ghei<9>e9`5e@* z|F2xQZ;(IijdnXVpJTtm0C2R^?QCv4|zrp)6ygfp+x zok`+r9`z|rb4hwS9`cWK6XPZ3GnuYq1Yh7KS}zu=PNP8rRqQSno(hBYb+6$r7fUQT z5v^3Wx#Pqks@lV_uS7@vx~Y^;QX+jm$yMrS=vz+cR51(_K zE`+NEk3QW98;>}!yYAH8aM@W|3Eef8ouqRbtIH}W7Q9$+qUCU0 zuU6+cCsq?~r#DWN!(g>e20N~5_X|wb&ezI-#K(WY%8$Uu1@02~MS<0m72>}nuo{^_ zeo|mHqJsRiz-mMdxuU>o90B|hft6n&e?{PP0;}>Oj=FPzKPvFY1pc_dUnlU_3;YcN zf1|+PB=9#2{4D~1tH74PpAh)Gz~3hDw+sA9fnOK+4S_ER{HDO468Jj=z9?|7zHiTLNDd_?p1i1->EhcMAMn0)Mx_>WLP9zbUXSa8}?UfvHi^ z*QyaC(is)_mcZ(%4spf>R-<*uRZj}6Mg_ok1fCLDwakcfSKt|eX9dm)JST8oVAb0r zP1O?uFABUQu z9f2PRTot$`urF{Ra9!Ys0*3-O1daq=6?jeHb%7rVydm&ofqy{Y9~Ah91pZ-x-xYXM z;Li&DBLe@Zz&|GNj|==00{^7IKPB)_3;Z(z|E$12C-Bb;{0jnqPT*e@_?HC!Wr2T1 z;9nK^*987`fqz5b-xT=s0)Ii^-xBz@1^yj@e^=n&6ZrQ9{sV!(DDWQ&{6_-+vA|yv z_{#$SiNIeG_)i7?GlBnH;J*;~F9rT9f&W_IzY+Lv1^zpM|6bsK5cnSj{wIO|S>S&W z_+JJ7H-Z0M;P=vl!FoKpN*WD{rv|Gpe2u_a`a8~pr!#AXC~PcOy+*VgV0?8d5vu<= wT9=AImwh!!<~WkkW8Xd+qSB48@;IgfRl|8~lm6QtrS8i6$wtB|&&*f<4+dBt^#A|> delta 1454 zcmd^8OKTHR6rOJ~oje+nnbbO|ZIdPyRM4hDp~<4n6(6+-Z7gbWB^6Wz5g&!>#srae zX(fh(BEA-_xM&qO#YGJ+T&cy4xDj!quB?Kug*2X-J4qFPf_E`<&v(xG&htKcqdmEf zsb=U8PhGsSsr35G@zULIM(N{Mt2F=Z!hd!*N^{i=r9AM`v74Ga_3QwppLx4>6kM5esN!i))n;y^czfNzhR7(2>hc$eZF5jo-66;fJ>!OMMW;WvnZK^&+PM@LLOxO zdtI9#y0Qj&|DNgujUr~DvE5lPx^?a&Ca1%3xhjcFyuD< zhz;&5I;g0Dvo<(f`5XM+f(*S+!lc|9WUPKis|ZyR3Ed@J@F86fT?1dRhaq+d$EQ{l zt#0Lk|4_Np2V0Q|=cT%y#Wbq}+ubk$h2FJrJFBhS>wM8c1%v4}^kF+qnLLciHfJ!m z?E7LHQij+n4eJp@?Z^a#%bEL3`F8ZvB^^H1ROog)dROLP%E0uAT^efyBEl{2E zypl6PglWOvnTZ(EbTh^iXmp$XG{&`g<6N7w@K&6+bdRJIt0*7mEgg}`3Bw)QjX1kZ z+AbgB=rP0rXT^-0;yIqcktM<7j{PIt;A*B6-uF|0S4&JGM81w|sb)@203Dfl) zWH3?#-CV>ft~11S&Lz+h*k6<$O$xn?Gq{wn_k9ISB+(}y(8`M`B)LHptkwTGi4@K9 zQ}WfzSxzFW4p-#E80