Skip to content

Commit

Permalink
Merge pull request #60 from it-is-me-mario/dev
Browse files Browse the repository at this point in the history
Dev
  • Loading branch information
mohammadamint authored May 4, 2023
2 parents 9c80a3a + 635594a commit 1b7bc91
Show file tree
Hide file tree
Showing 6 changed files with 58 additions and 48 deletions.
7 changes: 6 additions & 1 deletion mario/core/AttrData.py
Original file line number Diff line number Diff line change
Expand Up @@ -1186,7 +1186,8 @@ def to_txt(
units=True,
scenario="baseline",
_format="txt",
include_meta = False
include_meta = False,
sep = ','
):

"""Saves the database multiple text file based on given inputs
Expand Down Expand Up @@ -1222,6 +1223,9 @@ def to_txt(
include_meta : bool
saves the metadata as a json file along with the data
sep : str
txt file separator
"""
if scenario not in self.scenarios:
raise WrongInput(
Expand All @@ -1238,6 +1242,7 @@ def to_txt(
units,
scenario,
_format,
sep
)

if include_meta:
Expand Down
38 changes: 19 additions & 19 deletions mario/tools/excelhandler.py
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ def database_excel(instance, flows, coefficients, directory, units, scenario):
workbook.close()


def database_txt(instance, flows, coefficients, path, units, scenario, _format):
def database_txt(instance, flows, coefficients, path, units, scenario, _format,sep):

if flows:

Expand All @@ -453,19 +453,19 @@ def database_txt(instance, flows, coefficients, path, units, scenario, _format):
indeces=False,
units=False,
)[scenario]
if not os.path.exists(r"{}\{}".format(path, "flows")):
os.mkdir(r"{}\{}".format(path, "flows"))
if not os.path.exists(r"{}/{}".format(path, "flows")):
os.mkdir(r"{}/{}".format(path, "flows"))

for key, value in flows.items():

if os.path.exists(r"{}\{}\{}.{}".format(path, "flows", key, _format)):
os.remove(r"{}\{}\{}.{}".format(path, "flows", key, _format))
if os.path.exists(r"{}/{}/{}.{}".format(path, "flows", key, _format)):
os.remove(r"{}/{}/{}.{}".format(path, "flows", key, _format))

value.to_csv(
r"{}\{}\{}.{}".format(path, "flows", key, _format),
r"{}/{}/{}.{}".format(path, "flows", key, _format),
header=True,
index=True,
sep=",",
sep=sep,
mode="a",
)

Expand All @@ -479,19 +479,19 @@ def database_txt(instance, flows, coefficients, path, units, scenario, _format):
units=False,
)[scenario]

if not os.path.exists(r"{}\{}".format(path, "coefficients")):
os.mkdir(r"{}\{}".format(path, "coefficients"))
if not os.path.exists(r"{}/{}".format(path, "coefficients")):
os.mkdir(r"{}/{}".format(path, "coefficients"))

for key, value in coefficients.items():
if os.path.exists(
r"{}\{}\{}.{}".format(path, "coefficients", key, _format)
r"{}/{}/{}.{}".format(path, "coefficients", key, _format)
):
os.remove(r"{}\{}\{}.{}".format(path, "coefficients", key, _format))
os.remove(r"{}/{}/{}.{}".format(path, "coefficients", key, _format))
value.to_csv(
r"{}\{}\{}.{}".format(path, "coefficients", key, _format),
r"{}/{}/{}.{}".format(path, "coefficients", key, _format),
header=True,
index=True,
sep="\t",
sep=sep,
mode="a",
)

Expand Down Expand Up @@ -523,16 +523,16 @@ def database_txt(instance, flows, coefficients, path, units, scenario, _format):
else:
unit_dir = "flows"

if not os.path.exists(r"{}\{}".format(path, unit_dir)):
os.mkdir(r"{}\{}".format(path, unit_dir))
if not os.path.exists(r"{}/{}".format(path, unit_dir)):
os.mkdir(r"{}/{}".format(path, unit_dir))

if os.path.exists(r"{}\{}\units.{}".format(path, unit_dir, _format)):
os.remove(r"{}\{}\units.{}".format(path, unit_dir, _format))
if os.path.exists(r"{}/{}/units.{}".format(path, unit_dir, _format)):
os.remove(r"{}/{}/units.{}".format(path, unit_dir, _format))
_units.to_csv(
r"{}\{}\units.{}".format(path, unit_dir, _format),
r"{}/{}/units.{}".format(path, unit_dir, _format),
header=True,
index=True,
sep=",",
sep=sep,
mode="a",
)

Expand Down
6 changes: 5 additions & 1 deletion mario/tools/parsersclass.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ def parse_from_txt(
name=None,
source=None,
model="Database",
sep = ',',
**kwargs,
):

Expand Down Expand Up @@ -66,14 +67,17 @@ def parse_from_txt(
name : str, Optional
optional but suggested. is useful for visualization and metadata.
sep : str, Optional
txt file separator
Returns
-------
mario.Database
"""
if model not in models:
raise WrongInput("Available models are {}".format([*models]))

matrices, indeces, units = txt_praser(path, table, mode)
matrices, indeces, units = txt_praser(path, table, mode,sep)

return models[model](
name=name,
Expand Down
9 changes: 5 additions & 4 deletions mario/tools/tableparser.py
Original file line number Diff line number Diff line change
Expand Up @@ -309,7 +309,7 @@ def get_units(units, table, indeces):
return _


def txt_praser(path, table, mode):
def txt_praser(path, table, mode,sep):

if mode == "coefficients":
v, e, z = list("vez")
Expand All @@ -321,7 +321,7 @@ def txt_praser(path, table, mode):
path=path,
guide=txt_parser_id[mode],
sub_folder=False,
sep=",",
sep=sep,
exceptions=("EY"),
)

Expand All @@ -343,13 +343,14 @@ def txt_praser(path, table, mode):
log_time(
logger, "Parser: Parsing database finished. Calculating missing matrices.."
)

print(read["matrices"])
if mode == "flows":
read["matrices"]["X"] = calc_X(read["matrices"]["Z"], read["matrices"]["Y"])

else:
read["matrices"]["X"] = calc_X_from_w(calc_w(z), read["matrices"]["Y"])
read["matrices"]["X"] = calc_X_from_w(calc_w(read["matrices"]["z"]), read["matrices"]["Y"])


log_time(logger, "Parser: Production matrix calculated and added.")

if "EY" not in read["matrices"]:
Expand Down
28 changes: 14 additions & 14 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
version=__version__,
packages=find_packages(),
license="GNU General Public License v3.0",
python_requires=">.3.7.0",
#python_requires=">.3.7.0",
package_data={"": ["*.txt", "*.dat", "*.doc", "*.rst","*.xlsx"]},
install_requires=[
"pandas >= 1.3.3",
Expand All @@ -28,17 +28,17 @@
"pymrio >= 0.4.6",

],
classifiers=[
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Intended Audience :: End Users/Desktop",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Operating System :: MacOS :: MacOS X",
"Operating System :: Microsoft :: Windows",
"Programming Language :: Python",
"Topic :: Scientific/Engineering",
"Topic :: Utilities",
],
# classifiers=[
# "Programming Language :: Python :: 3.7",
# "Programming Language :: Python :: 3.8",
# "Programming Language :: Python :: 3.9",
# "Intended Audience :: End Users/Desktop",
# "Intended Audience :: Developers",
# "Intended Audience :: Science/Research",
# "Operating System :: MacOS :: MacOS X",
# "Operating System :: Microsoft :: Windows",
# "Programming Language :: Python",
# "Topic :: Scientific/Engineering",
# "Topic :: Utilities",
# ],
)
18 changes: 9 additions & 9 deletions tests/test_iomath.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,15 @@ def test_calc_all_shock(IOT_table):
)


def test_X_inverse():
x_array = np.array([1,2,3,0,0,1])
x_inv = np.array([1,1/2,1/3,0,0,1])
x_series = pd.Series(x_array,dtype=float)
x_frame = pd.DataFrame(x_array)

assert npt.assert_array_equal(x_inv,X_inverse(x_array))
assert npt.assert_array_equal(x_inv,X_inverse(x_series))
assert npt.assert_array_equal(x_inv,X_inverse(x_frame))
# def test_X_inverse():
# x_array = np.array([1,2,3,0,0,1])
# x_inv = np.array([1,1/2,1/3,0,0,1])
# x_series = pd.Series(x_array,dtype=float)
# x_frame = pd.DataFrame(x_array)

# assert npt.assert_array_equal(x_inv,X_inverse(x_array))
# assert npt.assert_array_equal(x_inv,X_inverse(x_series))
# assert npt.assert_array_equal(x_inv,X_inverse(x_frame))


def test_calc_X_from_z(IOT_table):
Expand Down

0 comments on commit 1b7bc91

Please sign in to comment.