From 871bbff914ff7e974bdf863aadcdde2f67e892a6 Mon Sep 17 00:00:00 2001 From: aleksandra-kim Date: Thu, 16 Jun 2022 13:34:29 +0200 Subject: [PATCH] version update --- .gitignore | 1 + akula/combustion.py | 4 +- akula/electricity/create_datapackages.py | 12 +- akula/electricity/entso_data_converter.py | 2 +- akula/markets.py | 25 +- akula/parameterized_exchanges.py | 5 +- akula/version.py | 2 +- dev/GSA project master definition.ipynb | 142 ++++-- dev/archived/paper3_market_plots.ipynb | 50 +- dev/draft.py | 560 ++++++++++++---------- dev/paper3_plot_electricity.py | 28 +- dev/paper3_plot_electricity_dirichlet.py | 16 +- dev/run_local_sa.py | 260 +++++----- dev/setac32_plot_combustion.py | 12 +- dev/setac32_plot_markets.py | 4 +- dev/setac32_plot_monte_carlo.py | 139 +++++- setup.cfg | 2 +- 17 files changed, 764 insertions(+), 500 deletions(-) diff --git a/.gitignore b/.gitignore index 7169fe3..2e4a4df 100644 --- a/.gitignore +++ b/.gitignore @@ -59,4 +59,5 @@ MANIFEST dev/write_files/ dev/archived/implicit_markets/ dev/archived/*.pickle +dev/archived/.ipynb_checkpoints/ data/ diff --git a/akula/combustion.py b/akula/combustion.py index ecfdd82..f0d08de 100644 --- a/akula/combustion.py +++ b/akula/combustion.py @@ -357,8 +357,8 @@ def generate_validation_datapackage(mask_tech, mask_bio, num_samples=SAMPLES, se if __name__ == "__main__": - random_seeds = [51, 52, 53, 54, 55, 56, 57, 58, 59, 60] - num_samples = SAMPLES + random_seeds = [85, 86] + num_samples = 15000 for random_seed in random_seeds: print(f"Random seed {random_seed}") generate_liquid_fuels_combustion_correlated_samples(num_samples, random_seed) diff --git a/akula/electricity/create_datapackages.py b/akula/electricity/create_datapackages.py index 647d230..2ce8e6e 100644 --- a/akula/electricity/create_datapackages.py +++ b/akula/electricity/create_datapackages.py @@ -1,5 +1,5 @@ -from .entso_data_converter import ENTSODataConverter -from .add_residual_mix import add_swiss_residual_mix +from entso_data_converter import ENTSODataConverter +from add_residual_mix import add_swiss_residual_mix import bw2data as bd import bw_processing as bwp from fs.zipfs import ZipFS @@ -141,20 +141,21 @@ def create_timeseries_entso_datapackages(): ndata = data.shape[1] - random_seeds = np.arange(61, 71) + random_seeds = [85, 86] + num_samples = 15000 for random_seed in random_seeds: print(f"Random seed {random_seed}") np.random.seed(random_seed) - choice = np.random.choice(np.arange(ndata), SAMPLES) + choice = np.random.choice(np.arange(ndata), num_samples) data_current = data[:, choice] # Create datapackage dp = bwp.create_datapackage( - fs=ZipFS(str(DATA_DIR / f"{name}-{random_seed}.zip"), write=True), + fs=ZipFS(str(DATA_DIR / "xgboost" / f"{name}-{random_seed}.zip"), write=True), name='timeseries ENTSO electricity values', seed=random_seed, sequential=True, @@ -169,5 +170,4 @@ def create_timeseries_entso_datapackages(): ) dp.finalize_serialization() - print(dp.data[1]) print("") diff --git a/akula/electricity/entso_data_converter.py b/akula/electricity/entso_data_converter.py index ab70bf3..d27b623 100644 --- a/akula/electricity/entso_data_converter.py +++ b/akula/electricity/entso_data_converter.py @@ -6,7 +6,7 @@ from bentso import CachingDataClient as CDC from bentso.constants import ENTSO_COUNTRIES, TRADE_PAIRS from pathlib import Path -from .config import ENTSO_MAPPING +from config import ENTSO_MAPPING assert bd.__version__ >= (4, 0, "DEV11") # assert bentso.__version__ >= (0, 4) diff --git a/akula/markets.py b/akula/markets.py index 0ac9e0d..3e4c01b 100644 --- a/akula/markets.py +++ b/akula/markets.py @@ -363,18 +363,19 @@ def generate_validation_datapackages(indices, mask, num_samples, seed=42): if __name__ == "__main__": - random_seeds = [48, 49, 50] - for random_seed in random_seeds: - print(f"Random seed {random_seed}") - generate_markets_datapackage( - similar_fuzzy, - get_dirichlet_scales, - "implicit-markets", - SAMPLES, - random_seed, - ) - - # im = bwp.load_datapackage(ZipFS(str(DATA_DIR / "implicit-markets-43.zip"))) + # random_seeds = [85, 86] + # num_samples = 15000 + # for random_seed in random_seeds: + # print(f"Random seed {random_seed}") + # generate_markets_datapackage( + # similar_fuzzy, + # get_dirichlet_scales, + # "implicit-markets", + # num_samples, + # random_seed, + # ) + + im = bwp.load_datapackage(ZipFS(str(DATA_DIR / "implicit-markets-91.zip"))) # im_data = im.get_resource('implicit-markets.data')[0] # im_indices = im.get_resource('implicit-markets.indices')[0] diff --git a/akula/parameterized_exchanges.py b/akula/parameterized_exchanges.py index 18bd875..397acc2 100644 --- a/akula/parameterized_exchanges.py +++ b/akula/parameterized_exchanges.py @@ -708,11 +708,12 @@ def generate_validation_datapackages(indices, mask, num_samples, seed=42): bd.projects.set_current("GSA for archetypes") # Generate datapackages for high-dimensional screening - random_seeds = [51, 52, 53, 54, 55, 56, 57, 58, 59, 60] + random_seeds = [85, 86] + num_samples = 15000 for random_seed in random_seeds: print(f"Random seed {random_seed}") parameters_dp, exchanges_dp = generate_parameterized_exchanges_datapackage( - "ecoinvent-parameterization", SAMPLES, random_seed + "ecoinvent-parameterization", num_samples, random_seed ) parameters_dp.finalize_serialization() exchanges_dp.finalize_serialization() diff --git a/akula/version.py b/akula/version.py index 2f2c468..9a767c1 100644 --- a/akula/version.py +++ b/akula/version.py @@ -1 +1 @@ -version = (0, 1, "DEV2") +version = (0, 1, "DEV3") diff --git a/dev/GSA project master definition.ipynb b/dev/GSA project master definition.ipynb index 9882686..aa01689 100644 --- a/dev/GSA project master definition.ipynb +++ b/dev/GSA project master definition.ipynb @@ -2,10 +2,19 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "id": "63711f04", "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "env: ENTSOE_API_TOKEN=98eb7887-8d16-43f4-b75c-1e8a80be520e\n", + "env: BENTSO_DATA_DIR=/Users/akim/Documents/LCA_files/entso-data-cache\n" + ] + } + ], "source": [ "import bw2data as bd\n", "import bw2calc as bc\n", @@ -37,27 +46,89 @@ "\n", "import sys\n", "sys.path.append('/Users/akim/PycharmProjects/akula')\n", - "from akula.electricity import replace_ei_with_entso, add_swiss_residual_mix" + "# from akula.electricity import replace_ei_with_entso, add_swiss_residual_mix" ] }, { "cell_type": "code", - "execution_count": null, - "id": "72c66e53", - "metadata": {}, - "outputs": [], - "source": [ - "bd.projects.delete_project(\"GSA for archetypes\", True)" - ] - }, - { - "cell_type": "code", - "execution_count": null, + "execution_count": 3, "id": "b155e53b-688d-40f2-8bb2-4e58e55c0925", "metadata": { "scrolled": true }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "\n", + "Impacts WITH exiobase\n", + "---------------------\n", + "Biosphere database already present!!! No setup is needed\n", + "ecoinvent 3.3 cutoff database already present!!! No import is needed\n", + "--> Consumption_db.xlsx already exists, reading it\n", + "Created consumption_db.xlsx file in BW format in 1.21 seconds\n", + "Extracted 1 worksheets in 1.28 seconds\n", + "Applying strategy: migrate_datasets\n", + "Applying strategy: migrate_exchanges\n", + "Applying strategy: migrate_datasets\n", + "Applying strategy: migrate_exchanges\n", + "Applying strategy: migrate_datasets\n", + "Applying strategy: migrate_exchanges\n", + "Applying strategy: migrate_datasets\n", + "Applying strategy: migrate_exchanges\n", + "Applying strategy: migrate_datasets\n", + "Applying strategy: migrate_exchanges\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akim/PycharmProjects/consumption_model_ch/consumption_model_ch/strategies/consumption_db.py:248: DtypeWarning: Columns (2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31,32,33,34,35,36,37,38,39,40,41,42,43,44,45,46,47,48,49,50,51,52,53,54,55,56,57,58,59,60,61,62,63,64,65,66,67,68,69,70,71,72,73,74,75,76,77,78,79,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,97,98,99,100,101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,131,132,133,134,135,136,137,138,139,140,141,142,143,144,145,146,147,148,149,150,151,152,153,154,155,156,157,158,159,160,161,162,163,164,165,166,167,168,169,170,171,172,173,174,175,176,177,178,179,180,181,182,183,184,185,186,187,188,189,190,191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206,207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253,254,255,256,257,258,259,260,261,262,263,264,265,266,267,268,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,287,288,289,290,291,292,293,294,295,296,297,298,299,300,301,302,303,304,305,306,307,308,309,310,311,312,313,314,315,316,317,318,319,320,321,322,323,324,325,326,327,328,329,330,331,332,333,334,335,336,337,338,339,340,341,342,343,344) have mixed types. Specify dtype option on import or set low_memory=False.\n", + " df = pd.read_table(filepath)\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Applying strategy: link_iterable_by_fields\n", + "Applying strategy: link_iterable_by_fields\n", + "Applying strategy: link_iterable_by_fields\n", + "Applying strategy: link_iterable_by_fields\n", + "Applied 0 strategies in 0.00 seconds\n", + "207 datasets\n", + "6676 exchanges\n", + "0 unlinked exchanges\n", + " \n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Writing activities to SQLite3 database:\n", + "0% [##############################] 100% | ETA: 00:00:00\n", + "Total time elapsed: 00:00:00\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Title: Writing activities to SQLite3 database:\n", + " Started: 05/28/2022 22:27:11\n", + " Finished: 05/28/2022 22:27:12\n", + " Total time elapsed: 00:00:00\n", + " CPU %: 99.90\n", + " Memory %: 2.07\n", + "Created database: swiss consumption 1.0\n", + "1756.551 ch hh average consumption aggregated, years 151617\n" + ] + } + ], "source": [ "def import_all_databases(use_exiobase, year='091011', add_activities=True, add_archetypes=False):\n", "\n", @@ -111,8 +182,8 @@ " )\n", " \n", " # Modify electricity mixes\n", - " add_swiss_residual_mix()\n", - " replace_ei_with_entso()\n", + "# add_swiss_residual_mix()\n", + "# replace_ei_with_entso()\n", "\n", " # Add uncertainties to GWP values\n", " method = (\"IPCC 2013\", \"climate change\", \"GWP 100a\", \"uncertain\")\n", @@ -139,13 +210,13 @@ " lca.lcia()\n", " print(\"{:8.3f} {}\".format(lca.score, demand_act['name']))\n", "\n", - " # LCIA for all Swiss consumption sectors\n", - " sectors = sorted([act for act in co if \"sector\" in act['name'].lower() and str(year) in act['name']])\n", - " for demand_act in sectors:\n", - " lca = bc.LCA({demand_act: 1}, method)\n", - " lca.lci()\n", - " lca.lcia()\n", - " print(\"{:8.3f} {}\".format(lca.score, demand_act['name']))\n", + "# # LCIA for all Swiss consumption sectors\n", + "# sectors = sorted([act for act in co if \"sector\" in act['name'].lower() and str(year) in act['name']])\n", + "# for demand_act in sectors:\n", + "# lca = bc.LCA({demand_act: 1}, method)\n", + "# lca.lci()\n", + "# lca.lcia()\n", + "# print(\"{:8.3f} {}\".format(lca.score, demand_act['name']))\n", " \n", "\n", "def plot_archetypes(use_exiobase, year):\n", @@ -184,18 +255,19 @@ "\n", "if __name__ == \"__main__\":\n", "\n", - " print(\"Impacts WITHOUT exiobase\")\n", - " print(\"------------------------\")\n", - " use_ex = False\n", - " year = '151617'\n", - " import_all_databases(use_ex, year, True, False)\n", + "# print(\"Impacts WITHOUT exiobase\")\n", + "# print(\"------------------------\")\n", + "# use_ex = False\n", + "# year = '151617'\n", + "# import_all_databases(use_ex, year, True, False)\n", "# plot_archetypes(use_ex, year)\n", "\n", - "# print(\"\\n\")\n", - "# print(\"Impacts WITH exiobase\")\n", - "# print(\"---------------------\")\n", - "# use_ex = True\n", - "# import_all_databases(use_ex, year, True, False)\n", + " print(\"\\n\")\n", + " print(\"Impacts WITH exiobase\")\n", + " print(\"---------------------\")\n", + " use_ex = True\n", + " year = '151617'\n", + " import_all_databases(use_ex, year, True, False)\n", "# plot_archetypes(use_ex, year)\n" ] }, @@ -219,7 +291,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f50e7877", + "id": "e2726174", "metadata": {}, "outputs": [], "source": [] diff --git a/dev/archived/paper3_market_plots.ipynb b/dev/archived/paper3_market_plots.ipynb index cac821a..94a839c 100644 --- a/dev/archived/paper3_market_plots.ipynb +++ b/dev/archived/paper3_market_plots.ipynb @@ -10,7 +10,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 21, "id": "bb9c4955", "metadata": {}, "outputs": [], @@ -30,13 +30,13 @@ "\n", "bd.projects.set_current('GSA for archetypes')\n", "\n", - "fp_implicit_markets = DATA_DIR / \"implicit-markets.zip\"\n", + "fp_implicit_markets = DATA_DIR / \"xgboost\" / \"implicit-markets-81.zip\"\n", "dp = bwp.load_datapackage(ZipFS(fp_implicit_markets))" ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 22, "id": "c9279851", "metadata": {}, "outputs": [], @@ -48,7 +48,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 24, "id": "04b9924f", "metadata": {}, "outputs": [], @@ -68,18 +68,19 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 25, "id": "5fde5cff", "metadata": {}, "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "/var/folders/mf/xg26zj4n7sg00qbjlfk7p9qr0000gn/T/ipykernel_69010/4232875168.py:44: RuntimeWarning:\n", - "\n", - "divide by zero encountered in double_scalars\n", - "\n" + "ename": "KeyError", + "evalue": "0", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyError\u001b[0m Traceback (most recent call last)", + "Input \u001b[0;32mIn [25]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 23\u001b[0m alpha \u001b[38;5;241m=\u001b[39m x\u001b[38;5;241m.\u001b[39mcopy()\n\u001b[1;32m 24\u001b[0m alpha_exc_dict \u001b[38;5;241m=\u001b[39m {alpha[i]: current[i] \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(alpha))}\n\u001b[0;32m---> 26\u001b[0m scaling_factors_str \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mSF=\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mscaling_factors[count]\u001b[38;5;132;01m:\u001b[39;00m\u001b[38;5;124m5.3f\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 27\u001b[0m fig \u001b[38;5;241m=\u001b[39m make_subplots(\n\u001b[1;32m 28\u001b[0m rows\u001b[38;5;241m=\u001b[39mrows, \n\u001b[1;32m 29\u001b[0m cols\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m,\n\u001b[1;32m 30\u001b[0m subplot_titles\u001b[38;5;241m=\u001b[39m[scaling_factors_str]\n\u001b[1;32m 31\u001b[0m \u001b[38;5;66;03m# subplot_titles = scores_str,\u001b[39;00m\n\u001b[1;32m 32\u001b[0m )\n\u001b[1;32m 34\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i,exc \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28menumerate\u001b[39m(current):\n", + "\u001b[0;31mKeyError\u001b[0m: 0" ] } ], @@ -91,7 +92,7 @@ "num_bins = 100\n", "count = 0\n", "fit_var = True\n", - "based_on_contr = True\n", + "based_on_contr = False\n", "use_th = False\n", "scaling_factors = get_dirichlet_scales(found, fit_var, based_on_contr, use_th)\n", " \n", @@ -189,7 +190,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 26, "id": "a0684289", "metadata": {}, "outputs": [], @@ -212,7 +213,7 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 38, "id": "45edecb6", "metadata": {}, "outputs": [], @@ -366,7 +367,7 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": null, "id": "64ef59ad", "metadata": {}, "outputs": [], @@ -390,23 +391,10 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": null, "id": "01bf4076", "metadata": {}, - "outputs": [ - { - "ename": "FileNotFoundError", - "evalue": "[Errno 2] No such file or directory: '/Users/akim/PycharmProjects/akula/akula/data/generic-markets.pickle'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mFileNotFoundError\u001b[0m Traceback (most recent call last)", - "Input \u001b[0;32mIn [14]\u001b[0m, in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[38;5;66;03m# ei_name = \"ecoinvent 3.8 cutoff\"\u001b[39;00m\n\u001b[1;32m 2\u001b[0m \u001b[38;5;66;03m# found = find_uncertain_implicit_markets(ei_name)\u001b[39;00m\n\u001b[1;32m 3\u001b[0m \u001b[38;5;66;03m# markets = find_markets(ei_name)\u001b[39;00m\n\u001b[1;32m 4\u001b[0m \u001b[38;5;66;03m# write_pickle(found, \"implicit_markets.pickle\")\u001b[39;00m\n\u001b[1;32m 5\u001b[0m \u001b[38;5;66;03m# write_pickle(markets, \"normal_markets.pickle\")\u001b[39;00m\n\u001b[1;32m 6\u001b[0m ims \u001b[38;5;241m=\u001b[39m read_pickle(DATA_DIR \u001b[38;5;241m/\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mimplicit-markets.pickle\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m----> 7\u001b[0m gms \u001b[38;5;241m=\u001b[39m \u001b[43mread_pickle\u001b[49m\u001b[43m(\u001b[49m\u001b[43mDATA_DIR\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m/\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mgeneric-markets.pickle\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n", - "File \u001b[0;32m~/PycharmProjects/gsa-framework-master/gsa_framework/utils.py:38\u001b[0m, in \u001b[0;36mread_pickle\u001b[0;34m(filepath)\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mread_pickle\u001b[39m(filepath):\n\u001b[1;32m 37\u001b[0m \u001b[38;5;124;03m\"\"\"Read ``data`` from a file with .pickle extension\"\"\"\u001b[39;00m\n\u001b[0;32m---> 38\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m \u001b[38;5;28;43mopen\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mfilepath\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mrb\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m \u001b[38;5;28;01mas\u001b[39;00m f:\n\u001b[1;32m 39\u001b[0m data \u001b[38;5;241m=\u001b[39m pickle\u001b[38;5;241m.\u001b[39mload(f)\n\u001b[1;32m 40\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m data\n", - "\u001b[0;31mFileNotFoundError\u001b[0m: [Errno 2] No such file or directory: '/Users/akim/PycharmProjects/akula/akula/data/generic-markets.pickle'" - ] - } - ], + "outputs": [], "source": [ "# ei_name = \"ecoinvent 3.8 cutoff\"\n", "# found = find_uncertain_implicit_markets(ei_name)\n", diff --git a/dev/draft.py b/dev/draft.py index b9e4f11..db8911b 100644 --- a/dev/draft.py +++ b/dev/draft.py @@ -1,248 +1,328 @@ -import numpy as np +# import numpy as np +# from pathlib import Path +# from fs.zipfs import ZipFS +# import bw2data as bd +# import bw2calc as bc +# import bw_processing as bwp +# from copy import deepcopy +# from gsa_framework.utils import read_pickle, write_pickle +# from gsa_framework.visualization.plotting import plot_correlation_Y1_Y2 +# import plotly.graph_objects as go +# +# # Local files +# from akula.sensitivity_analysis.local_sensitivity_analysis import ( +# run_local_sa_technosphere, get_mask, get_tindices_wo_noninf +# ) +# from akula.sensitivity_analysis.remove_non_influential import ( +# get_variance_threshold, add_variances, get_indices_high_variance +# ) +# from akula.markets import DATA_DIR +# from akula.background import get_lca_score_shift +# +# +# if __name__ == "__main__": +# +# project = 'GSA for archetypes' +# bd.projects.set_current(project) +# const_factor = 10 +# ctff = 1e-6 # Cutoff for contribution analysis +# mclc = 1e10 # Maximum number of computations for supply chain traversal +# +# # Setups +# ######## +# +# co_db = bd.Database('swiss consumption 1.0') +# ei_db = bd.Database("ecoinvent 3.8 cutoff") +# fu = [act for act in co_db if "Food" in act['name']][0] +# +# write_dir = Path("write_files") / project.lower().replace(" ", "_") \ +# / fu['name'].lower().replace(" ", "_").replace(",", "") +# write_dir_sct = write_dir / "supply_chain_traversal" +# write_dir_sct.mkdir(exist_ok=True, parents=True) +# +# demand = {fu: 1} +# method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain") +# fu_mapped, pkgs, _ = bd.prepare_lca_inputs(demand=demand, method=method, remapping=False) +# +# lca = bc.LCA(demand=fu_mapped, data_objs=pkgs, use_distributions=False) +# lca.lci() +# lca.lcia() +# static_score = deepcopy(lca.score) +# print(static_score) +# +# # Get all relevant data +# ei = bd.Database('ecoinvent 3.8 cutoff').datapackage() +# co = bd.Database('swiss consumption 1.0').datapackage() +# cf = bd.Method(method).datapackage() +# +# # Technosphere +# tei = ei.filter_by_attribute('matrix', 'technosphere_matrix') +# tindices_ei = tei.get_resource('ecoinvent_3.8_cutoff_technosphere_matrix.indices')[0] +# tdata_ei = tei.get_resource('ecoinvent_3.8_cutoff_technosphere_matrix.data')[0] +# tflip_ei = tei.get_resource('ecoinvent_3.8_cutoff_technosphere_matrix.flip')[0] +# tdistributions_ei = tei.get_resource('ecoinvent_3.8_cutoff_technosphere_matrix.distributions')[0] +# +# # Step 1.1 Technosphere & Supply chain traversal +# fp_sct = write_dir_sct / f"sct.cutoff_{ctff:.0e}.maxcalc_{mclc:.0e}.pickle" +# if fp_sct.exists(): +# tindices_wo_noninf = read_pickle(fp_sct) +# else: +# tindices_wo_noninf = get_tindices_wo_noninf(lca, ctff, mclc) +# write_pickle(tindices_wo_noninf, fp_sct) +# +# fp_tmask_wo_noninf = write_dir / f"mask.tech.without_noninf.sct.cutoff_{ctff:.0e}.maxcalc_{mclc:.0e}.pickle" +# if fp_tmask_wo_noninf.exists(): +# tmask_wo_noninf = read_pickle(fp_tmask_wo_noninf) +# else: +# tmask_wo_noninf = get_mask(tindices_ei, tindices_wo_noninf) +# write_pickle(tmask_wo_noninf, fp_tmask_wo_noninf) +# +# # STEP 2: Run local SA +# ###################### +# +# # --> 2.1.1 Technosphere +# +# const_factors = [1/const_factor, const_factor] +# # 2.1.1 Ecoinvent +# fp_tlocal_sa = write_dir / f"local_sa.tech.cutoff_{ctff:.0e}.maxcalc_{mclc:.0e}.pickle" +# if fp_tlocal_sa.exists(): +# tlocal_sa = read_pickle(fp_tlocal_sa) +# else: +# tlocal_sa = run_local_sa_technosphere( +# fu_mapped, +# pkgs, +# tdistributions_ei, +# tmask_wo_noninf, +# const_factors, +# write_dir, +# f"tech.cutoff_{ctff:.0e}.maxcalc_{mclc:.0e}", +# ) +# write_pickle(tlocal_sa, fp_tlocal_sa) +# +# # 2.4.2 Determine variance threshold +# +# add_variances([tlocal_sa], static_score) +# num_parameters = 10000 +# var_threshold = get_variance_threshold([tlocal_sa], num_parameters) +# +# datapackages = { +# "technosphere": { +# "local_sa": tlocal_sa, +# "indices": tindices_ei, +# }, +# } +# +# # 2.4.3 Construct masks for all inputs after local SA +# count = 0 +# print(f"Selected {num_parameters} exchanges after local SA:") +# for name, data in datapackages.items(): +# dtype = bwp.INDICES_DTYPE +# is_params = False +# indices_wo_lowinf = get_indices_high_variance(data['local_sa'], var_threshold) +# mask_wo_lowinf = get_mask(data["indices"], indices_wo_lowinf, is_params) +# data['indices_wo_lowinf'] = np.array(indices_wo_lowinf, dtype=dtype) +# data['mask_wo_lowinf'] = mask_wo_lowinf +# print(f" {mask_wo_lowinf.sum():5d} from {name}") +# count += mask_wo_lowinf.sum() +# +# # # 2.5 --> Validation of results after local SA +# # +# # viterations = 30 +# vseed = 22222000 +# tname = "technosphere" +# # +# # from akula.background import generate_validation_datapackages +# # tmask = datapackages[tname]["mask_wo_lowinf"] +# # tdp_vall, tdp_vinf = generate_validation_datapackages( +# # demand, tname, tindices_ei, tmask, num_samples=viterations, seed=vseed +# # ) +# # datapackages[tname]['local_sa.validation_all'] = tdp_vall +# # datapackages[tname]['local_sa.validation_inf'] = tdp_vinf +# # +# # # 2.6.1 All inputs vary +# # print("computing all scores") +# # lca_all = bc.LCA( +# # fu_mapped, +# # data_objs=pkgs + [tdp_vall], +# # use_distributions=False, +# # use_arrays=True, +# # seed_override=vseed, +# # ) +# # lca_all.lci() +# # lca_all.lcia() +# # scores_all = [lca_all.score for _, _ in zip(lca_all, range(viterations))] +# # +# # # 2.6.1 Only influential inputs vary +# # print("computing inf scores") +# # lca_inf = bc.LCA( +# # fu_mapped, +# # data_objs=pkgs + [tdp_vinf], +# # use_distributions=False, +# # use_arrays=True, +# # seed_override=vseed, +# # ) +# # lca_inf.lci() +# # lca_inf.lcia() +# # scores_inf = [lca_inf.score for _, _ in zip(lca_inf, range(viterations))] +# # +# # masks_dict_all = { +# # tname: np.ones(len(datapackages[tname]["mask_wo_lowinf"]), dtype=bool), +# # } +# # masks_dict_inf = { +# # tname: datapackages[tname]["mask_wo_lowinf"], +# # } +# # +# # offset_all = get_lca_score_shift(demand, masks_dict_all, shift_median=False) +# # offset_inf = get_lca_score_shift(demand, masks_dict_inf, shift_median=False) +# # print(offset_all, offset_inf) +# # +# # Y1 = np.array(scores_all) - offset_all +# # Y2 = np.array(scores_inf) - offset_inf +# # +# # fig = plot_correlation_Y1_Y2( +# # Y1, +# # Y2, +# # start=0, +# # end=50, +# # trace_name1="All vary", +# # trace_name2="Only inf vary" +# # ) +# # fig.add_trace( +# # go.Scatter( +# # x=[0], +# # y=[static_score], +# # mode='markers', +# # marker=dict(color='black', symbol='x') +# # ) +# # ) +# # fig.show() +# +# # STEP 3: Run high-dimensional screening +# ######################################## +# +# # 3.1.1. Create background datapackages for Xgboost +# from akula.background import create_background_datapackage +# xiterations = 5000 +# # random_seeds = [71, 72, 73, 74] +# # for random_seed in random_seeds: +# # print(random_seed) +# # for bg_name in [tname]: +# # print(bg_name) +# # indices = datapackages[bg_name]["indices_wo_lowinf"] +# # dp = create_background_datapackage( +# # demand, bg_name, f"tech-{bg_name}-{random_seed}", indices, num_samples=xiterations, seed=random_seed, +# # ) +# # dp.finalize_serialization() +# +# # fp_name = "tech-technosphere-61 copy.zip" +# # dp_copy = bwp.load_datapackage(ZipFS(str(DATA_DIR / "xgboost" / fp_name))) +# +# print("-----------------") +# +# # 3.1.2. MC simulations for XGBoost +# dps_xgboost_names = list(datapackages) +# random_seeds = [71, 72, 73, 74] +# for random_seed in random_seeds: +# print(f"MC random seed {random_seed}") +# fp_xgboost = write_dir / f"mc.tech.xgboost.{xiterations}.{random_seed}.pickle" +# if fp_xgboost.exists(): +# scores_xgboost = read_pickle(fp_xgboost) +# else: +# dps_xgboost = [] +# for dp_name in dps_xgboost_names: +# dp_temp = bwp.load_datapackage(ZipFS(str(DATA_DIR / "xgboost" / f"tech-{dp_name}-{random_seed}.zip"))) +# dps_xgboost.append(dp_temp) +# +# lca_xgboost = bc.LCA( +# fu_mapped, +# data_objs=pkgs + dps_xgboost, +# use_distributions=False, +# use_arrays=True, +# ) +# lca_xgboost.lci() +# lca_xgboost.lcia() +# scores_xgboost = [lca_xgboost.score] + [lca_xgboost.score for _, _ in zip(lca_xgboost, range(xiterations-1))] +# write_pickle(scores_xgboost, fp_xgboost) +# +# print("") +# +# +# # ref_scores = [] +# # for d in dp_temp.data[1].T: +# # ref_dp = bwp.create_datapackage( +# # name='test', +# # sequential=True, +# # ) +# # ref_dp.add_persistent_vector( +# # matrix="technosphere_matrix", +# # data_array=d, +# # # Resource group name that will show up in provenance +# # name='test', +# # indices_array=indices, +# # flip_array=dp_temp.data[2], +# # ) +# # ref_lca = bc.LCA( +# # fu_mapped, +# # data_objs=pkgs + [ref_dp], +# # use_distributions=False, +# # use_arrays=True, +# # ) +# # ref_lca.lci() +# # ref_lca.lcia() +# # ref_scores.append(ref_lca.score) +# # +# # print(scores_xgboost) +# # print(f" Correct scores: {ref_scores}") + +import pandas as pd from pathlib import Path -from fs.zipfs import ZipFS -import bw2data as bd -import bw2calc as bc +import numpy as np import bw_processing as bwp -from copy import deepcopy -from gsa_framework.utils import read_pickle, write_pickle -from gsa_framework.visualization.plotting import plot_correlation_Y1_Y2 -import plotly.graph_objects as go - -# Local files -from akula.sensitivity_analysis.local_sensitivity_analysis import ( - run_local_sa_technosphere, get_mask, get_tindices_wo_noninf -) -from akula.sensitivity_analysis.remove_non_influential import ( - get_variance_threshold, add_variances, get_indices_high_variance -) -from akula.markets import DATA_DIR -from akula.background import get_lca_score_shift - - -if __name__ == "__main__": - - project = 'GSA for archetypes' - bd.projects.set_current(project) - const_factor = 10 - ctff = 1e-6 # Cutoff for contribution analysis - mclc = 1e10 # Maximum number of computations for supply chain traversal - - # Setups - ######## - - co_db = bd.Database('swiss consumption 1.0') - ei_db = bd.Database("ecoinvent 3.8 cutoff") - fu = [act for act in co_db if "Food" in act['name']][0] - - write_dir = Path("write_files") / project.lower().replace(" ", "_") \ - / fu['name'].lower().replace(" ", "_").replace(",", "") - write_dir_sct = write_dir / "supply_chain_traversal" - write_dir_sct.mkdir(exist_ok=True, parents=True) - - demand = {fu: 1} - method = ("IPCC 2013", "climate change", "GWP 100a", "uncertain") - fu_mapped, pkgs, _ = bd.prepare_lca_inputs(demand=demand, method=method, remapping=False) - - lca = bc.LCA(demand=fu_mapped, data_objs=pkgs, use_distributions=False) - lca.lci() - lca.lcia() - static_score = deepcopy(lca.score) - print(static_score) - - # Get all relevant data - ei = bd.Database('ecoinvent 3.8 cutoff').datapackage() - co = bd.Database('swiss consumption 1.0').datapackage() - cf = bd.Method(method).datapackage() - - # Technosphere - tei = ei.filter_by_attribute('matrix', 'technosphere_matrix') - tindices_ei = tei.get_resource('ecoinvent_3.8_cutoff_technosphere_matrix.indices')[0] - tdata_ei = tei.get_resource('ecoinvent_3.8_cutoff_technosphere_matrix.data')[0] - tflip_ei = tei.get_resource('ecoinvent_3.8_cutoff_technosphere_matrix.flip')[0] - tdistributions_ei = tei.get_resource('ecoinvent_3.8_cutoff_technosphere_matrix.distributions')[0] - - # Step 1.1 Technosphere & Supply chain traversal - fp_sct = write_dir_sct / f"sct.cutoff_{ctff:.0e}.maxcalc_{mclc:.0e}.pickle" - if fp_sct.exists(): - tindices_wo_noninf = read_pickle(fp_sct) - else: - tindices_wo_noninf = get_tindices_wo_noninf(lca, ctff, mclc) - write_pickle(tindices_wo_noninf, fp_sct) - - fp_tmask_wo_noninf = write_dir / f"mask.tech.without_noninf.sct.cutoff_{ctff:.0e}.maxcalc_{mclc:.0e}.pickle" - if fp_tmask_wo_noninf.exists(): - tmask_wo_noninf = read_pickle(fp_tmask_wo_noninf) - else: - tmask_wo_noninf = get_mask(tindices_ei, tindices_wo_noninf) - write_pickle(tmask_wo_noninf, fp_tmask_wo_noninf) - - # STEP 2: Run local SA - ###################### - - # --> 2.1.1 Technosphere - - const_factors = [1/const_factor, const_factor] - # 2.1.1 Ecoinvent - fp_tlocal_sa = write_dir / f"local_sa.tech.cutoff_{ctff:.0e}.maxcalc_{mclc:.0e}.pickle" - if fp_tlocal_sa.exists(): - tlocal_sa = read_pickle(fp_tlocal_sa) - else: - tlocal_sa = run_local_sa_technosphere( - fu_mapped, - pkgs, - tdistributions_ei, - tmask_wo_noninf, - const_factors, - write_dir, - f"tech.cutoff_{ctff:.0e}.maxcalc_{mclc:.0e}", - ) - write_pickle(tlocal_sa, fp_tlocal_sa) - - # 2.4.2 Determine variance threshold - - add_variances([tlocal_sa], static_score) - num_parameters = 10000 - var_threshold = get_variance_threshold([tlocal_sa], num_parameters) - - datapackages = { - "technosphere": { - "local_sa": tlocal_sa, - "indices": tindices_ei, - }, - } - - # 2.4.3 Construct masks for all inputs after local SA - count = 0 - print(f"Selected {num_parameters} exchanges after local SA:") - for name, data in datapackages.items(): - dtype = bwp.INDICES_DTYPE - is_params = False - indices_wo_lowinf = get_indices_high_variance(data['local_sa'], var_threshold) - mask_wo_lowinf = get_mask(data["indices"], indices_wo_lowinf, is_params) - data['indices_wo_lowinf'] = np.array(indices_wo_lowinf, dtype=dtype) - data['mask_wo_lowinf'] = mask_wo_lowinf - print(f" {mask_wo_lowinf.sum():5d} from {name}") - count += mask_wo_lowinf.sum() - - # # 2.5 --> Validation of results after local SA - # - # viterations = 30 - vseed = 22222000 - tname = "technosphere" - # - # from akula.background import generate_validation_datapackages - # tmask = datapackages[tname]["mask_wo_lowinf"] - # tdp_vall, tdp_vinf = generate_validation_datapackages( - # demand, tname, tindices_ei, tmask, num_samples=viterations, seed=vseed - # ) - # datapackages[tname]['local_sa.validation_all'] = tdp_vall - # datapackages[tname]['local_sa.validation_inf'] = tdp_vinf - # - # # 2.6.1 All inputs vary - # print("computing all scores") - # lca_all = bc.LCA( - # fu_mapped, - # data_objs=pkgs + [tdp_vall], - # use_distributions=False, - # use_arrays=True, - # seed_override=vseed, - # ) - # lca_all.lci() - # lca_all.lcia() - # scores_all = [lca_all.score for _, _ in zip(lca_all, range(viterations))] - # - # # 2.6.1 Only influential inputs vary - # print("computing inf scores") - # lca_inf = bc.LCA( - # fu_mapped, - # data_objs=pkgs + [tdp_vinf], - # use_distributions=False, - # use_arrays=True, - # seed_override=vseed, - # ) - # lca_inf.lci() - # lca_inf.lcia() - # scores_inf = [lca_inf.score for _, _ in zip(lca_inf, range(viterations))] - # - # masks_dict_all = { - # tname: np.ones(len(datapackages[tname]["mask_wo_lowinf"]), dtype=bool), - # } - # masks_dict_inf = { - # tname: datapackages[tname]["mask_wo_lowinf"], - # } - # - # offset_all = get_lca_score_shift(demand, masks_dict_all, shift_median=False) - # offset_inf = get_lca_score_shift(demand, masks_dict_inf, shift_median=False) - # print(offset_all, offset_inf) - # - # Y1 = np.array(scores_all) - offset_all - # Y2 = np.array(scores_inf) - offset_inf - # - # fig = plot_correlation_Y1_Y2( - # Y1, - # Y2, - # start=0, - # end=50, - # trace_name1="All vary", - # trace_name2="Only inf vary" - # ) - # fig.add_trace( - # go.Scatter( - # x=[0], - # y=[static_score], - # mode='markers', - # marker=dict(color='black', symbol='x') - # ) - # ) - # fig.show() - - # STEP 3: Run high-dimensional screening - ######################################## +import bw2data as bd - # 3.1.1. Create background datapackages for Xgboost - from akula.background import create_background_datapackage - xiterations = 5000 - random_seeds = [71, 72, 73, 74] - for random_seed in random_seeds: - print(random_seed) - for bg_name in [tname]: - print(bg_name) - indices = datapackages[bg_name]["indices_wo_lowinf"] - dp = create_background_datapackage( - demand, bg_name, f"tech-{bg_name}-{random_seed}", indices, num_samples=xiterations, seed=random_seed, - ) - dp.finalize_serialization() +bd.projects.set_current("GSA for archetypes") - # fp_name = "tech-technosphere-61 copy.zip" - # dp_copy = bwp.load_datapackage(ZipFS(str(DATA_DIR / "xgboost" / fp_name))) +path = Path("/Users/akim/Documents/paper 3, env research letters/") +ifp = path / "gsa_ranking_independent.xlsx" +cfp = path / "gsa_ranking_correlated.xlsx" +idp = pd.read_excel(ifp, skiprows=[1]) +cdp = pd.read_excel(cfp, skiprows=[1]) - print("-----------------") +modules = list(set(cdp['module'])) +ilist = idp[["input_id", "output_id"]].values +clist = cdp[["input_id", "output_id"]].values +ituple = [tuple(el) for el in ilist] +ctuple = [tuple(el) for el in clist] +iio = np.array(ituple, dtype=bwp.INDICES_DTYPE) +cio = np.array(ctuple, dtype=bwp.INDICES_DTYPE) - # 3.1.2. MC simulations for XGBoost - dps_xgboost_names = list(datapackages) - for random_seed in random_seeds: - print(f"MC random seed {random_seed}") - fp_xgboost = write_dir / f"mc.tech.xgboost.{xiterations}.{random_seed}.pickle" - if fp_xgboost.exists(): - scores_xgboost = read_pickle(fp_xgboost) - else: - dps_xgboost = [] - for dp_name in dps_xgboost_names: - dp_temp = bwp.load_datapackage(ZipFS(str(DATA_DIR / "xgboost" / f"tech-{dp_name}-{random_seed}.zip"))) - dps_xgboost.append(dp_temp) +idata = {} +cdata = {} +for m in modules: + iwhere = np.where(idp['module'].values == m)[0] + idata[m] = iio[iwhere] + cwhere = np.where(cdp['module'].values == m)[0] + cdata[m] = cio[cwhere] + interesection = np.intersect1d(idata[m], cdata[m]).shape[0] + i_minus_c = np.setdiff1d(idata[m], cdata[m]).shape[0] + c_minus_i = np.setdiff1d(cdata[m], idata[m]).shape[0] + print(m, i_minus_c, interesection, c_minus_i) +print("") - lca_xgboost = bc.LCA( - fu_mapped, - data_objs=pkgs + dps_xgboost, - use_distributions=False, - use_arrays=True, - seed_override=vseed, - ) - lca_xgboost.lci() - lca_xgboost.lcia() - scores_xgboost = [lca_xgboost.score for _, _ in zip(lca_xgboost, range(xiterations))] - print(scores_xgboost) - write_pickle(scores_xgboost, fp_xgboost) - print("") +# import bw2data as bd +# import bw2calc as bc +# import bw_processing as bwp +# from fs.zipfs import ZipFS +# from akula.markets import DATA_DIR +# from gsa_framework.utils import read_pickle +# +# +# bd.projects.set_current("GSA for archetypes") +# dp = bwp.load_datapackage(ZipFS(str(DATA_DIR / "xgboost" / "implicit-markets-81.zip"))) +# fp_markets = DATA_DIR / "implicit-markets.pickle" +# +# markets = read_pickle(fp_markets) +# +# +# print("") diff --git a/dev/paper3_plot_electricity.py b/dev/paper3_plot_electricity.py index dfebfaa..6e44f33 100644 --- a/dev/paper3_plot_electricity.py +++ b/dev/paper3_plot_electricity.py @@ -51,15 +51,16 @@ def get_one(db_name, **kwargs): iterations = 2000 # Static values, Ecoinvent - lca = bc.LCA( - demand=fu_mapped, - data_objs=pkgs, - use_arrays=False, - use_distributions=False - ) - lca.lci() - lca.lcia() - static_ecoinvent = deepcopy(lca.score) + # lca = bc.LCA( + # demand=fu_mapped, + # data_objs=pkgs, + # use_arrays=False, + # use_distributions=False + # ) + # lca.lci() + # lca.lcia() + # static_ecoinvent = deepcopy(lca.score) + static_ecoinvent = 0.048644 print(static_ecoinvent) # Static values, Entso @@ -125,7 +126,8 @@ def get_one(db_name, **kwargs): color_darkgray_rgb = f"rgb(72, 80, 99, {opacity})" color_black_hex = "#212931" color_pink_rgb = f"rgba(148, 52, 110, {opacity})" - color_bright_pink_rgb = "#ff44cc" + color_bright_pink_rgb = "#e75480" #"#ff44cc" + color_psi_lpurple = "#914967" Y_ecoinvent = scores_ecoinvent Y_entso = scores_entso @@ -146,7 +148,7 @@ def get_one(db_name, **kwargs): hist_data=[Y_ecoinvent, Y_entso], group_labels=group_labels, bin_size=.005, - colors=[color_darkgray_rgb, color_pink_rgb], + colors=[color_darkgray_rgb, color_psi_lpurple], ) fig.add_trace( @@ -180,7 +182,7 @@ def get_one(db_name, **kwargs): ) fig.update_xaxes( - title_text=r"$\text{GWP of Swiss low voltage electricity}$", + title_text=r"$\text{LCIA scores, [kg CO}_2\text{-eq.]}$", showgrid=True, gridwidth=1, gridcolor=color_gray_hex, @@ -227,7 +229,7 @@ def get_one(db_name, **kwargs): fig.show() - fig.write_image(write_dir / f"_fig_ch_low_voltage_{option}.png") + fig.write_image(write_dir / f"_figure_1_ch_low_voltage_{option}.eps") diff --git a/dev/paper3_plot_electricity_dirichlet.py b/dev/paper3_plot_electricity_dirichlet.py index 90bb528..4220d46 100644 --- a/dev/paper3_plot_electricity_dirichlet.py +++ b/dev/paper3_plot_electricity_dirichlet.py @@ -9,7 +9,7 @@ from copy import deepcopy # Local files -from akula.electricity.create_datapackages import DATA_DIR +from akula.markets import DATA_DIR color_gray_hex = "#b2bcc0" color_darkgray_hex = "#485063" @@ -21,10 +21,12 @@ color_psi_brown = "#85543a" color_psi_green = "#82911a" color_psi_blue = "#003b6e" +color_psi_lblue = "#415483" color_psi_yellow = "#fdca00" color_psi_purple = "#7c204e" +color_psi_lpurple = "#914967" color_psi_dgreen = "#197418" -opacity = 0.95 +opacity = 0.9 num_bins = 100 widths = [580, 400, 1400] @@ -37,8 +39,8 @@ 8310: 21.75257995687919, # high voltage } -plot_lognormal = False -plot_dirichlet = False +plot_lognormal = True +plot_dirichlet = True plot_zoomed = True dirichlet_scales = { @@ -194,6 +196,7 @@ name=r"$\text{Dirichlet samples}$", showlegend=showlegend, opacity=opacity, + marker=dict(opacity=opacity), line=dict(color=color_psi_dgreen, width=1, shape="hvh"), fill="tozeroy", legendrank=3, @@ -209,6 +212,7 @@ name=r"$\text{ENTSO-E reported data}$", showlegend=showlegend, opacity=opacity, + marker=dict(opacity=opacity), line=dict(color=color_darkgray_hex, width=1, shape="hvh"), fill="tozeroy", legendrank=1, @@ -346,7 +350,7 @@ ) ) ) - fig.write_image(write_figs / f"_fig_dirichlet_validation_{plot_lognormal}_{plot_dirichlet}_{plot_zoomed}.pdf") + fig.write_image(write_figs / f"_figure_4_dirichlet_validation_{plot_lognormal}_{plot_dirichlet}_{plot_zoomed}.eps") - # fig.show() + fig.show() diff --git a/dev/run_local_sa.py b/dev/run_local_sa.py index 9f541d6..ebc7c22 100644 --- a/dev/run_local_sa.py +++ b/dev/run_local_sa.py @@ -7,6 +7,7 @@ from copy import deepcopy from gsa_framework.utils import read_pickle, write_pickle from gsa_framework.visualization.plotting import plot_correlation_Y1_Y2 +import plotly.graph_objects as go # Local files from akula.sensitivity_analysis.local_sensitivity_analysis import ( @@ -27,23 +28,17 @@ project = 'GSA for archetypes' bd.projects.set_current(project) const_factor = 10 - ctff = 1e-6 # Cutoff for contribution analysis - mclc = 1e10 # Maximum number of computations for supply chain traversal + ctff = 1e-9 # Cutoff for contribution analysis + mclc = 1e20 # Maximum number of computations for supply chain traversal # Setups ######## - co = bd.Database('swiss consumption 1.0') - # fu = [act for act in co if "ch hh average consumption aggregated, years 151617" == act['name']][0] - - ei = bd.Database("ecoinvent 3.8 cutoff") - # fu = {act: 1/3 for act in ei if "market group for electricity" in act['name'] and "RER" in act['location']} - - fu = [act for act in co if "Food" in act['name']][0] + co_db = bd.Database('swiss consumption 1.0') + fu = [act for act in co_db if "ch hh average consumption aggregated, years 151617" == act['name']][0] write_dir = Path("write_files") / project.lower().replace(" ", "_") \ / fu['name'].lower().replace(" ", "_").replace(",", "") - # write_dir = Path("write_files") / project.lower().replace(" ", "_") / "eu_electricity" write_dir_sct = write_dir / "supply_chain_traversal" write_dir_sct.mkdir(exist_ok=True, parents=True) @@ -69,16 +64,16 @@ tflip_ei = tei.get_resource('ecoinvent_3.8_cutoff_technosphere_matrix.flip')[0] tdistributions_ei = tei.get_resource('ecoinvent_3.8_cutoff_technosphere_matrix.distributions')[0] - # # Biosphere - # bei = ei.filter_by_attribute('matrix', 'biosphere_matrix') - # bindices = bei.get_resource('ecoinvent_3.8_cutoff_biosphere_matrix.indices')[0] - # bdata = bei.get_resource('ecoinvent_3.8_cutoff_biosphere_matrix.data')[0] - # bdistributions = bei.get_resource('ecoinvent_3.8_cutoff_biosphere_matrix.distributions')[0] - # - # # Characterization - # cindices = cf.get_resource('IPCC_2013_climate_change_GWP_100a_uncertain_matrix_data.indices')[0] - # cdata = cf.get_resource('IPCC_2013_climate_change_GWP_100a_uncertain_matrix_data.data')[0] - # cdistributions = cf.get_resource('IPCC_2013_climate_change_GWP_100a_uncertain_matrix_data.distributions')[0] + # Biosphere + bei = ei.filter_by_attribute('matrix', 'biosphere_matrix') + bindices = bei.get_resource('ecoinvent_3.8_cutoff_biosphere_matrix.indices')[0] + bdata = bei.get_resource('ecoinvent_3.8_cutoff_biosphere_matrix.data')[0] + bdistributions = bei.get_resource('ecoinvent_3.8_cutoff_biosphere_matrix.distributions')[0] + + # Characterization + cindices = cf.get_resource('IPCC_2013_climate_change_GWP_100a_uncertain_matrix_data.indices')[0] + cdata = cf.get_resource('IPCC_2013_climate_change_GWP_100a_uncertain_matrix_data.data')[0] + cdistributions = cf.get_resource('IPCC_2013_climate_change_GWP_100a_uncertain_matrix_data.distributions')[0] # STEP 1: Remove non influential with contribution analysis ############################################################ @@ -98,23 +93,23 @@ tmask_wo_noninf = get_mask(tindices_ei, tindices_wo_noninf) write_pickle(tmask_wo_noninf, fp_tmask_wo_noninf) - # # Step 1.2 Biosphere - # bindices_wo_noninf = get_bindices_wo_noninf(lca) - # fp_bmask_wo_noninf = write_dir / "mask.bio.without_noninf.pickle" - # if fp_bmask_wo_noninf.exists(): - # bmask_wo_noninf = read_pickle(fp_bmask_wo_noninf) - # else: - # bmask_wo_noninf = get_mask(bindices, bindices_wo_noninf) - # write_pickle(bmask_wo_noninf, fp_bmask_wo_noninf) - # - # # Step 1.3 Characterization - # cindices_wo_noninf = get_cindices_wo_noninf(lca) - # fp_cmask_wo_noninf = write_dir / "mask.cf.without_noninf.pickle" - # if fp_cmask_wo_noninf.exists(): - # cmask_wo_noninf = read_pickle(fp_cmask_wo_noninf) - # else: - # cmask_wo_noninf = get_mask(cindices, cindices_wo_noninf) - # write_pickle(cmask_wo_noninf, fp_cmask_wo_noninf) + # Step 1.2 Biosphere + bindices_wo_noninf = get_bindices_wo_noninf(lca) + fp_bmask_wo_noninf = write_dir / "mask.bio.without_noninf.pickle" + if fp_bmask_wo_noninf.exists(): + bmask_wo_noninf = read_pickle(fp_bmask_wo_noninf) + else: + bmask_wo_noninf = get_mask(bindices, bindices_wo_noninf) + write_pickle(bmask_wo_noninf, fp_bmask_wo_noninf) + + # Step 1.3 Characterization + cindices_wo_noninf = get_cindices_wo_noninf(lca) + fp_cmask_wo_noninf = write_dir / "mask.cf.without_noninf.pickle" + if fp_cmask_wo_noninf.exists(): + cmask_wo_noninf = read_pickle(fp_cmask_wo_noninf) + else: + cmask_wo_noninf = get_mask(cindices, cindices_wo_noninf) + write_pickle(cmask_wo_noninf, fp_cmask_wo_noninf) # STEP 2: Run local SA ###################### @@ -225,40 +220,40 @@ # write_pickle(elocal_sa, fp_elocal_sa) # --> 2.2.1 Biosphere, 12'400 exchanges - # fp_blocal_sa = write_dir / f"local_sa.bio.pickle" - # if fp_blocal_sa.exists(): - # blocal_sa = read_pickle(fp_blocal_sa) - # else: - # blocal_sa = run_local_sa( - # "biosphere", - # fu_mapped, - # pkgs, - # bindices, - # bdata, - # bdistributions, - # bmask_wo_noninf, - # None, - # const_factor, - # ) - # write_pickle(blocal_sa, fp_blocal_sa) - # - # # --> 2.3.1 Characterization, 77 exchanges - # fp_clocal_sa = write_dir / "local_sa.cf.pickle" - # if fp_clocal_sa.exists(): - # clocal_sa = read_pickle(fp_clocal_sa) - # else: - # clocal_sa = run_local_sa( - # "characterization", - # fu_mapped, - # pkgs, - # cindices, - # cdata, - # cdistributions, - # cmask_wo_noninf, - # None, - # const_factor, - # ) - # write_pickle(clocal_sa, fp_clocal_sa) + fp_blocal_sa = write_dir / f"local_sa.bio.pickle" + if fp_blocal_sa.exists(): + blocal_sa = read_pickle(fp_blocal_sa) + else: + blocal_sa = run_local_sa( + "biosphere", + fu_mapped, + pkgs, + bindices, + bdata, + bdistributions, + bmask_wo_noninf, + None, + const_factor, + ) + write_pickle(blocal_sa, fp_blocal_sa) + + # --> 2.3.1 Characterization, 77 exchanges + fp_clocal_sa = write_dir / "local_sa.cf.pickle" + if fp_clocal_sa.exists(): + clocal_sa = read_pickle(fp_clocal_sa) + else: + clocal_sa = run_local_sa( + "characterization", + fu_mapped, + pkgs, + cindices, + cdata, + cdistributions, + cmask_wo_noninf, + None, + const_factor, + ) + write_pickle(clocal_sa, fp_clocal_sa) # 2.4 --> Remove lowly influential based on variance datapackages = { @@ -291,14 +286,14 @@ indices = dp.get_resource(f'{rg_name}.indices')[0] if type_ == "tech": pop_indices_from_dict(indices, tlocal_sa) - # elif type_ == "bio": - # pop_indices_from_dict(indices, blocal_sa) + elif type_ == "bio": + pop_indices_from_dict(indices, blocal_sa) # 2.4.2 Determine variance threshold local_sa_list = [ tlocal_sa, - # blocal_sa, - # clocal_sa, + blocal_sa, + clocal_sa, # mlocal_sa, # plocal_sa, # flocal_sa, @@ -307,12 +302,12 @@ add_variances(local_sa_list, static_score) # tlocal_sa_all = {**tlocal_sa, **mlocal_sa, **flocal_sa, **elocal_sa} - tlocal_sa_all = {**tlocal_sa} # TODO change!! + tlocal_sa_all = {**tlocal_sa} # assert len(tlocal_sa) + len(mlocal_sa) + len(flocal_sa) + len(elocal_sa) == len(tlocal_sa_all) - # - num_parameters = 10000 - # local_sa_list = [tlocal_sa_all, blocal_sa, clocal_sa, plocal_sa] # TODO change - local_sa_list = [tlocal_sa_all] + + num_parameters = 15000 + # local_sa_list = [tlocal_sa_all, blocal_sa, clocal_sa, plocal_sa] + local_sa_list = [tlocal_sa_all, blocal_sa, clocal_sa] var_threshold = get_variance_threshold(local_sa_list, num_parameters) datapackages.update( @@ -321,14 +316,14 @@ "local_sa": tlocal_sa, "indices": tindices_ei, }, - # "biosphere": { - # "local_sa": blocal_sa, - # "indices": bindices, - # }, - # "characterization": { - # "local_sa": clocal_sa, - # "indices": cindices - # }, + "biosphere": { + "local_sa": blocal_sa, + "indices": bindices, + }, + "characterization": { + "local_sa": clocal_sa, + "indices": cindices + }, # "implicit-markets": { # "local_sa": mlocal_sa, # "indices": mindices, @@ -376,37 +371,37 @@ # 2.5 --> Validation of results after local SA - viterations = 30 - vseed = 22222000 - - # # 2.5.1 Technosphere + # viterations = 30 + # vseed = 22222000 + # + # # # 2.5.1 Technosphere # from akula.background import generate_validation_datapackages tname = "technosphere" # tmask = datapackages[tname]["mask_wo_lowinf"] # tdp_vall, tdp_vinf = generate_validation_datapackages( - # tname, tindices_ei, tmask, num_samples=viterations, seed=vseed + # demand, tname, tindices_ei, tmask, num_samples=viterations, seed=vseed # ) # datapackages[tname]['local_sa.validation_all'] = tdp_vall # datapackages[tname]['local_sa.validation_inf'] = tdp_vinf # - # 2.5.2 Biosphere - # bname = "biosphere" + # # 2.5.2 Biosphere + bname = "biosphere" # bmask = datapackages[bname]["mask_wo_lowinf"] # bdp_vall, bdp_vinf = generate_validation_datapackages( - # bname, bindices, bmask, num_samples=viterations, seed=vseed + # demand, bname, bindices, bmask, num_samples=viterations, seed=vseed # ) # datapackages[bname]['local_sa.validation_all'] = bdp_vall # datapackages[bname]['local_sa.validation_inf'] = bdp_vinf # # # # 2.5.3 Characterization - # cname = "characterization" + cname = "characterization" # cmask = datapackages[cname]["mask_wo_lowinf"] # cdp_vall, cdp_vinf = generate_validation_datapackages( - # cname, cindices, cmask, num_samples=viterations, seed=vseed + # demand, cname, cindices, cmask, num_samples=viterations, seed=vseed # ) # datapackages[cname]['local_sa.validation_all'] = cdp_vall # datapackages[cname]['local_sa.validation_inf'] = cdp_vinf - + # # # 2.5.4 Markets # from akula.markets import generate_validation_datapackages # mname = "implicit-markets" @@ -431,7 +426,7 @@ # print("computing all scores") # lca_all = bc.LCA( # fu_mapped, - # data_objs=pkgs + [tdp_vall], + # data_objs=pkgs + [tdp_vall, bdp_vall, cdp_vall, mdp_vall], # use_distributions=False, # use_arrays=True, # seed_override=vseed, @@ -444,7 +439,7 @@ # print("computing inf scores") # lca_inf = bc.LCA( # fu_mapped, - # data_objs=pkgs + [tdp_vinf], + # data_objs=pkgs + [tdp_vinf, bdp_vinf, cdp_vinf, mdp_vinf], # use_distributions=False, # use_arrays=True, # seed_override=vseed, @@ -455,17 +450,17 @@ # # masks_dict_all = { # tname: np.ones(len(datapackages[tname]["mask_wo_lowinf"]), dtype=bool), - # # bname: np.ones(len(datapackages[bname]["mask_wo_lowinf"]), dtype=bool), - # # cname: np.ones(len(datapackages[cname]["mask_wo_lowinf"]), dtype=bool), + # bname: np.ones(len(datapackages[bname]["mask_wo_lowinf"]), dtype=bool), + # cname: np.ones(len(datapackages[cname]["mask_wo_lowinf"]), dtype=bool), # } # masks_dict_inf = { # tname: datapackages[tname]["mask_wo_lowinf"], - # # bname: datapackages[bname]["mask_wo_lowinf"], - # # cname: datapackages[cname]["mask_wo_lowinf"], + # bname: datapackages[bname]["mask_wo_lowinf"], + # cname: datapackages[cname]["mask_wo_lowinf"], # } # - # offset_all = get_lca_score_shift(masks_dict_all, shift_median=False) - # offset_inf = get_lca_score_shift(masks_dict_inf, shift_median=False) + # offset_all = get_lca_score_shift(demand, masks_dict_all, shift_median=False) + # offset_inf = get_lca_score_shift(demand, masks_dict_inf, shift_median=False) # print(offset_all, offset_inf) # # Y1 = np.array(scores_all) - offset_all @@ -489,37 +484,41 @@ # ) # fig.show() - # print("") + print("") # STEP 3: Run high-dimensional screening ######################################## # 3.1.1. Create background datapackages for Xgboost from akula.background import create_background_datapackage - xiterations = 5000 - random_seeds = [61, 62, 63, 64, 65, 66, 67, 68] - random_seeds = [61] - for random_seed in random_seeds: - print(random_seed) - for bg_name in [tname]: - print(bg_name) - indices = datapackages[bg_name]["indices_wo_lowinf"] - dp = create_background_datapackage( - demand, bg_name, f"tech-{bg_name}-{random_seed}", indices, num_samples=xiterations, seed=random_seed, - ) - dp.finalize_serialization() - - fp_name = "tech-technosphere-61 copy.zip" - dp_read = bwp.load_datapackage(ZipFS(str(DATA_DIR / "xgboost" / fp_name))) - - print("-----------------") + xiterations = 15000 + # random_seeds = [94] + # for random_seed in random_seeds: + # print(random_seed) + # for bg_name in [tname]: + # print(bg_name) + # indices = datapackages[bg_name]["indices_wo_lowinf"] + # dp = create_background_datapackage( + # demand, bg_name, f"{bg_name}-{random_seed}", indices, num_samples=xiterations, seed=random_seed, + # ) + # dp.finalize_serialization() + # + # print("-----------------") # 3.1.2. MC simulations for XGBoost - dps_xgboost_names = list(datapackages) - random_seeds = [777] + dps_xgboost_names = [ + "technosphere", + "biosphere", + "characterization", + # "implicit-markets", + # "ecoinvent-parameterization", + # "liquid-fuels-kilogram", + # "entso-average", + ] + random_seeds = [91, 92, 93, 94, 95, 96] for random_seed in random_seeds: print(f"MC random seed {random_seed}") - fp_xgboost = write_dir / f"mc.tech.xgboost.{xiterations}.{random_seed}.pickle" + fp_xgboost = write_dir / f"mc.base.xgboost.{xiterations}.{random_seed}.pickle" if fp_xgboost.exists(): scores_xgboost = read_pickle(fp_xgboost) else: @@ -529,7 +528,7 @@ dp_name = "entso-timeseries" if dp_name == "ecoinvent-parameterization": dp_name = "ecoinvent-parameterization-exchanges" - dp_temp = bwp.load_datapackage(ZipFS(str(DATA_DIR / "xgboost" / f"tech-{dp_name}-{random_seed}.zip"))) + dp_temp = bwp.load_datapackage(ZipFS(str(DATA_DIR / "xgboost" / f"{dp_name}-{random_seed}.zip"))) dps_xgboost.append(dp_temp) lca_xgboost = bc.LCA( @@ -537,11 +536,10 @@ data_objs=pkgs + dps_xgboost, use_distributions=False, use_arrays=True, - seed_override=vseed, ) lca_xgboost.lci() lca_xgboost.lcia() - scores_xgboost = [lca_xgboost.score for _, _ in zip(lca_xgboost, range(xiterations))] + scores_xgboost = [lca_xgboost.score] + [lca_xgboost.score for _, _ in zip(lca_xgboost, range(xiterations-1))] print(scores_xgboost) write_pickle(scores_xgboost, fp_xgboost) diff --git a/dev/setac32_plot_combustion.py b/dev/setac32_plot_combustion.py index 1049d4e..112fbe8 100644 --- a/dev/setac32_plot_combustion.py +++ b/dev/setac32_plot_combustion.py @@ -9,8 +9,8 @@ # Local files from akula.combustion import DATA_DIR -plot_fig1 = False -plot_fig2 = True +plot_fig1 = True +plot_fig2 = False if __name__ == "__main__": @@ -34,8 +34,8 @@ opacity = 0.6 num_bins = 100 - # ind = 27 - ind = 403 + ind = 27 + # ind = 403 activity = bd.get_activity(int(bindices[ind]['col'])) if plot_fig1: @@ -134,9 +134,9 @@ plot_bgcolor="rgba(255,255,255,1)", ) - fig.write_image(write_figs / f"{bindices[ind]['row']}_{bindices[ind]['col']}_carbon_balance.pdf") + # fig.write_image(write_figs / f"{bindices[ind]['row']}_{bindices[ind]['col']}_carbon_balance.pdf") - # fig.show() + fig.show() if plot_fig2: ei = bd.Database("ecoinvent 3.8 cutoff") diff --git a/dev/setac32_plot_markets.py b/dev/setac32_plot_markets.py index 962337c..c40a964 100644 --- a/dev/setac32_plot_markets.py +++ b/dev/setac32_plot_markets.py @@ -198,7 +198,7 @@ plot_bgcolor="rgba(255,255,255,1)", ) - fig.write_image(write_figs / f"{act_id}_implicit_market.pdf") - # fig.show() + # fig.write_image(write_figs / f"{act_id}_implicit_market.eps") + fig.show() print("") diff --git a/dev/setac32_plot_monte_carlo.py b/dev/setac32_plot_monte_carlo.py index 77a4f26..20c60b9 100644 --- a/dev/setac32_plot_monte_carlo.py +++ b/dev/setac32_plot_monte_carlo.py @@ -2,15 +2,17 @@ from pathlib import Path import numpy as np from gsa_framework.utils import read_pickle -from gsa_framework.visualization.plotting import plot_correlation_Y1_Y2 +import plotly.graph_objects as go +from plotly.subplots import make_subplots +# from gsa_framework.visualization.plotting import plot_correlation_Y1_Y2 from akula.markets import DATA_DIR # option = "implicit-markets" # option = "liquid-fuels-kilogram" -option = "ecoinvent-parameterization" -# option = "entso-timeseries" +# option = "ecoinvent-parameterization" +option = "entso-timeseries" color_gray_hex = "#b2bcc0" color_darkgray_hex = "#485063" @@ -30,6 +32,116 @@ opacity = 0.9 num_bins = 100 + +def plot_correlation_Y1_Y2( + Y1, + Y2, + start=0, + end=50, + trace_name1="Y1", + trace_name2="Y2", + trace_name3="Scatter plot", + color1="#636EFA", + color2="#EF553B", + color3="#A95C9A", + xaxes1_title_text=None, + yaxes1_title_text="Values", + xaxes2_title_text="Values", + yaxes2_title_text="Values", + showtitle=True, +): + """Function that plots subset of datapoints of ``Y1`` and ``Y2``, used by Validation class.""" + x = np.arange(start, end) + fig = make_subplots( + rows=1, + cols=2, + shared_xaxes=False, + ) + fig.add_trace( + go.Scatter( + x=x, + y=Y1[start:end] - Y2[start:end], + name=trace_name1, + mode="markers", + marker=dict(color=color1, symbol='diamond-tall', size=8), + showlegend=True, + ), + row=1, + col=1, + ) + fig.add_trace( + go.Scatter( + x=Y1, + y=Y2, + name=trace_name3, + mode="markers", + marker=dict( + color=color3, + line=dict( + width=1, + # color="#782e69", + color="black", + ), + ), + showlegend=True, + opacity=0.65, + ), + row=1, + col=2, + ) + fig.update_layout( + width=800, + height=220, + legend=dict(x=0.4, y=1.0), # on top + xaxis1=dict(domain=[0.0, 0.63]), + xaxis2=dict(domain=[0.78, 1.0]), + margin=dict(l=20, r=20, t=40, b=20), + ) + if xaxes1_title_text is None: + text = "Subset of {0}/{1} datapoints".format(end - start, Y1.shape[0]) + xaxes1_title_text = text + fig.update_xaxes( + title_text=xaxes1_title_text, + row=1, + col=1, + ) + Ymin = min(np.hstack([Y1, Y2])) + Ymax = max(np.hstack([Y1, Y2])) + fig.update_yaxes(range=[Ymin, Ymax], title_text=yaxes1_title_text, row=1, col=1) + + fig.update_xaxes( + range=[Ymin, Ymax], + title_text=xaxes2_title_text, + # color=color1, + row=1, + col=2, + ) + fig.update_yaxes( + range=[Ymin, Ymax], + title_text=yaxes2_title_text, + # color=color2, + row=1, + col=2, + ) + if showtitle: + from scipy.stats import spearmanr + + pearson_coef = np.corrcoef([Y1, Y2])[0, -1] + spearman_coef, _ = spearmanr(Y1, Y2) + fig.update_layout( + title=dict( + text="Pearson = {:4.3f}, Spearman = {:4.3f}".format( + pearson_coef, spearman_coef + ), + font=dict( + size=14, + ), + ) + ) + + return fig + + if __name__ == "__main__": project = "GSA for archetypes" @@ -49,7 +161,7 @@ Yoption = read_pickle(fp_monte_carlo_option) # Plot histograms - ex_offset = 660 + ex_offset = 1756.551 - 1135.223913835494 Y1 = np.array(Ybase) + ex_offset Y2 = np.array(Yoption) + ex_offset @@ -60,7 +172,7 @@ Y1 = Y1[mask] Y2 = Y2[mask] - trace_name1 = r"$\text{Independent sampling}$" + trace_name1 = r"$\text{Difference between independent and correlated sampling}$" if option == "liquid-fuels-kilogram": use_option = "carbon balancing" elif option == "entso-timeseries": @@ -73,8 +185,10 @@ # use_option = option trace_name2 = r'$\text{Sampling with ' + f'{use_option.replace("-", " ")}' + '}$' + trace_name3 = r'$\text{Scatter plot data points}$' lcia_text = r"$\text{LCIA scores, [kg CO}_2\text{-eq]}$" + lcia_text2 = r"$\Delta \text{ LCIA scores, [kg CO}_2\text{-eq]}$" fig = plot_correlation_Y1_Y2( Y1, @@ -83,7 +197,8 @@ end=50, trace_name1=trace_name1, trace_name2=trace_name2, - yaxes1_title_text=lcia_text, + trace_name3=trace_name3, + yaxes1_title_text=lcia_text2, xaxes2_title_text=lcia_text, yaxes2_title_text=lcia_text, showtitle=False, @@ -106,7 +221,6 @@ linecolor=color_gray_hex, ) fig.update_yaxes( - range=(1700, 2050), title_standoff=7, showgrid=True, gridwidth=1, @@ -118,10 +232,12 @@ linewidth=1, linecolor=color_gray_hex, ) - fig.update_xaxes(range=(1700, 2050), col=2) + fig.update_xaxes(range=(1680, 2020), col=2) + fig.update_yaxes(range=(-160, 160), col=1) + fig.update_yaxes(range=(1680, 2020), col=2) fig.update_layout( width=700, - height=220, + height=190, legend=dict( yanchor="bottom", y=1.1, @@ -131,10 +247,11 @@ font=dict(size=13), bordercolor=color_darkgray_hex, borderwidth=1, + itemsizing="constant", ), - margin=dict(t=60, b=10, l=10, r=10), + margin=dict(t=40, b=10, l=10, r=10), paper_bgcolor="rgba(255,255,255,1)", plot_bgcolor="rgba(255,255,255,1)", ) - fig.write_image(write_figs / f"mc.{option}.{iterations}.{seed}.pdf") + fig.write_image(write_figs / f"mc.{option}.{iterations}.{seed}.eps") # fig.show() diff --git a/setup.cfg b/setup.cfg index 8d6f2d5..762fbe3 100644 --- a/setup.cfg +++ b/setup.cfg @@ -5,7 +5,7 @@ [metadata] name = akula -version = 0.1.dev2 +version = 0.1.dev3 description = Add a short description here! author = aleksandra-kim, cmutel author_email = aleksandra.kim@icloud.com