Skip to content

Commit

Permalink
Update 3d small-scale rotating drum example and postprocessing (#1383)
Browse files Browse the repository at this point in the history
* Update small-scale-rotating-drum example doc

* Fix save vtu functionality of lethe-pyvista-tools and update example accordingly

* Final details on documentation and doc bug fix

* Small fixes in the small 3d rotating drum documentation

* Remove duplicated reference to fix warning

---------

Co-authored-by: Oreste Marquis <[email protected]>
  • Loading branch information
voferreira and OresteMarquis authored Nov 22, 2024
1 parent f8be426 commit fe3cfb5
Show file tree
Hide file tree
Showing 14 changed files with 124 additions and 70 deletions.
2 changes: 1 addition & 1 deletion contrib/postprocessing/example.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
# .list_pvtu and reads the '.pvtu' files inside the pointed folder as pyvista
# dataframes.
print('List of all .pvtu: ')
print(example.list_vtu)
print(example.list_pvtu)
print('Time list, if transient: ')
print(example.time_list)

Expand Down
29 changes: 19 additions & 10 deletions contrib/postprocessing/lethe_pyvista_tools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
# Import modules
import pyvista as pv
from tqdm import tqdm
import os

# Define class:
class lethe_pyvista_tools():
Expand Down Expand Up @@ -66,7 +67,9 @@ def __init__(self, case_path = '.', prm_file_name = '', pvd_name = '', prefix =
self.time_list -> Returns the list of times corresponding to
datasets.
self.list_vtu -> Returns the list of names of .pvtu files.
self.list_pvtu -> Returns the list of names of .pvtu files.
self.list_vtu -> Returns the list of names of .vtu files for copy purposes.
self.padding -> Returns the padding of pvtu file numbering.
Expand All @@ -82,8 +85,7 @@ def __init__(self, case_path = '.', prm_file_name = '', pvd_name = '', prefix =
self.padding = '0'

if n_procs is None:
from os import cpu_count
self.n_procs = cpu_count()
self.n_procs = 1
else:
self.n_procs = n_procs

Expand Down Expand Up @@ -162,6 +164,9 @@ def __init__(self, case_path = '.', prm_file_name = '', pvd_name = '', prefix =
else:
self.prm_dict[clean_line[0]] = clean_line[1]

if 'output file name' not in self.prm_dict.keys():
self.prm_dict['output file name'] = 'out'

print(f'Successfully constructed. To see the .prm dictionary, print($NAME.prm_dict)')

# Define path where pvtu files are
Expand All @@ -177,27 +182,31 @@ def __init__(self, case_path = '.', prm_file_name = '', pvd_name = '', prefix =
self.time_list = self.reader.time_values

# Create a list of all files' names
list_vtu = [pvd_datasets[x].path for x in range(len(pvd_datasets))]
list_pvtu = [pvd_datasets[x].path for x in range(len(pvd_datasets))]

# Remove duplicates
list_vtu = list(dict.fromkeys(list_vtu))
list_pvtu = list(dict.fromkeys(list_pvtu))

# Select data
if last is None:
self.list_vtu = list_vtu[first::step]
self.list_pvtu = list_pvtu[first::step]
self.time_list = self.time_list[first::step]
self.first = first
self.step = step
self.last = len(self.time_list) - 1
self.pvd_datasets = self.reader.datasets[first::step]
else:
self.list_vtu = list_vtu[first:last:step]
self.list_pvtu = list_pvtu[first:last:step]
self.time_list = self.time_list[first:last:step]
self.first = first
self.step = step
self.last = last
self.pvd_datasets = self.reader.datasets[first:last:step]

#Define list of VTU files
list_vtu = self.list_pvtu.copy()
self.list_vtu = [x.replace('.pvtu', '.00000.vtu') for x in list_vtu]

if len(prefix) > 0:
self.create_copy(prefix = prefix)

Expand All @@ -216,17 +225,17 @@ def __init__(self, case_path = '.', prm_file_name = '', pvd_name = '', prefix =
self.df = []

# Read PVTU data
n_pvtu = len(self.list_vtu)
n_pvtu = len(self.list_pvtu)
pbar = tqdm(total = n_pvtu, desc="Reading PVTU files")
for i in range(len(self.list_vtu)):
for i in range(len(self.list_pvtu)):

# Read dataframes from VTU files into df
self.df.append(self.get_df)
pbar.update(1)

self.df_available = True

print(f'Written .df[timestep] from timestep = 0 to timestep = {len(self.list_vtu)-1}')
print(f'Written .df[timestep] from timestep = 0 to timestep = {len(self.list_pvtu)-1}')

# IMPORT FUNCTIONS:

Expand Down
28 changes: 23 additions & 5 deletions contrib/postprocessing/lethe_pyvista_tools/_create_copy.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,16 @@ def create_copy(self, prefix):
for line in pvd_in:

# If line refers to a dataset
if "vtu" in line:
if "pvtu" in line:

# For all read files
for path in read_files_path_list:

# If line matches one of the files
if path in line:

# If vtu is in list_vtu
if line.split('file="')[1].split('"/>')[0] in self.list_vtu:
# If vtu is in list_pvtu
if line.split('file="')[1].split('"/>')[0] in self.list_pvtu:
line = line.replace('file="', f'file="{prefix}')
pvd_out.write(line)
read_files_path_list.remove(path)
Expand All @@ -32,18 +32,36 @@ def create_copy(self, prefix):
pvd_out.write(line)

# Make a copy of VTU files
n_vtu = len(self.list_vtu)
n_vtu = len(self.list_pvtu)
pbar = tqdm(total = n_vtu, desc="Writing modified VTU and PVD files")
new_list_pvtu = []
new_list_vtu = []
for i in range(len(self.list_vtu)):
for i in range(len(self.list_pvtu)):
new_vtu_reference = F' <Piece Source="{prefix}{self.list_vtu[i]}"/>\n'

# Copy file
shutil.copy2(f'{self.path_output}/{self.list_pvtu[i]}', f'{self.path_output}/{prefix}{self.list_pvtu[i]}')

# Change reference to VTU file in PVTU file
pvtu_content = []
with open(f'{self.path_output}/{prefix}{self.list_pvtu[i]}') as pvtu_in:
pvtu_content = pvtu_in.readlines()

with open(f'{self.path_output}/{prefix}{self.list_pvtu[i]}', 'w') as pvtu_out:
for line in pvtu_content:
if f'<Piece Source="' in line.strip():
line = new_vtu_reference
pvtu_out.write(line)

shutil.copy2(f'{self.path_output}/{self.list_vtu[i]}', f'{self.path_output}/{prefix}{self.list_vtu[i]}')

# Append to list of names of VTU files
new_list_pvtu.append(f'{prefix}{self.list_pvtu[i]}')
new_list_vtu.append(f'{prefix}{self.list_vtu[i]}')
pbar.update(1)


# Fix name of PVD and PVTU files
self.pvd_name = prefix + self.pvd_name
self.list_pvtu = new_list_pvtu
self.list_vtu = new_list_vtu
Original file line number Diff line number Diff line change
Expand Up @@ -69,8 +69,8 @@ def get_cylindrical_coords_loop(i):
df['points_cyl'][:, 0] = radius.tolist()
df['points_cyl'][:, 1] = theta
df['points_cyl'][:, 2] = z
df.save(f'{self.path_output}/{self.list_vtu[i]}')
df.save(f'{self.path_output}/{self.list_vtu[i]}', binary = False)

self.parallel_run(get_cylindrical_coords_loop, range(len(self.list_vtu)), tqdm_desc = "Getting cylindrical coords")
self.parallel_run(get_cylindrical_coords_loop, range(len(self.list_pvtu)), tqdm_desc = "Getting cylindrical coords")

self.has_cylindrical_coords = True
5 changes: 4 additions & 1 deletion contrib/postprocessing/lethe_pyvista_tools/_get_df.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,4 +17,7 @@ def get_df(self, time_step):
for data in self.ignore_data:
pvtu_reader.disable_point_array(data)

return pvtu_reader.read()
data = pvtu_reader.read()
data = data.cast_to_unstructured_grid()

return data
Original file line number Diff line number Diff line change
Expand Up @@ -75,8 +75,8 @@ def get_nearest_neighbors_loop(i):
df["neighbors_id"] = df["ID"][indices]
df["neighbors"] = indices
df["neighbors_dist"] = dist
df.save(f'{self.path_output}/{self.list_vtu[i]}')
df.save(f'{self.path_output}/{self.list_vtu[i]}', binary = False)

self.parallel_run(get_nearest_neighbors_loop, range(len(self.list_vtu)), tqdm_desc = "Finding neighbors")
self.parallel_run(get_nearest_neighbors_loop, range(len(self.list_pvtu)), tqdm_desc = "Finding neighbors")

self.has_neighbors = True
Original file line number Diff line number Diff line change
Expand Up @@ -71,8 +71,8 @@ def mixing_index_doucet(self, reference_time_step = 0, use_cyl = False, increasi
id_keys = df["ID"]

# Create list of mixing indices per time-step and array of eigenvectors
self.mixing_index = Manager().list(np.empty(len(self.list_vtu)))
self.mixing_eigenvector = Manager().list(np.empty((len(self.list_vtu), 3)))
self.mixing_index = Manager().list(np.empty(len(self.list_pvtu)))
self.mixing_eigenvector = Manager().list(np.empty((len(self.list_pvtu), 3)))

# Loop through dataframes and find its mixing index
global mixing_index_doucet_loop
Expand Down Expand Up @@ -109,7 +109,7 @@ def mixing_index_doucet_loop(i):
self.mixing_index[i] = max_eigenvalue
self.mixing_eigenvector[i] = assoc_eigenvectors.flatten().tolist()

self.parallel_run(mixing_index_doucet_loop, range(len(self.list_vtu)), tqdm_desc = "Calculating mixing index")
self.parallel_run(mixing_index_doucet_loop, range(len(self.list_pvtu)), tqdm_desc = "Calculating mixing index")

# Fix eigenvector data structure
self.mixing_eigenvector = np.asarray(self.mixing_eigenvector)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,8 +52,8 @@ def mixing_index_nearest_neighbors(self, n_neighbors = 15, reference_array = "pa
self.get_nearest_neighbors(n_neighbors = n_neighbors)

# Create empty list to store mixing_index per time-step
self.mixing_index = Manager().list(np.empty(len(self.list_vtu)))
self.mixing_index_std = Manager().list(np.empty(len(self.list_vtu)))
self.mixing_index = Manager().list(np.empty(len(self.list_pvtu)))
self.mixing_index_std = Manager().list(np.empty(len(self.list_pvtu)))

# Loop through dataframes and find its mixing index
global mixing_index_nearest_neighbors_loop
Expand All @@ -77,7 +77,7 @@ def mixing_index_nearest_neighbors_loop(i):
self.df[i][self.mixing_index_array_name] = mixing_index_per_particle
else:
df[self.mixing_index_array_name] = mixing_index_per_particle
df.save(f'{self.path_output}/{self.list_vtu[i]}')
df.save(f'{self.path_output}/{self.list_vtu[i]}', binary = False)

mixing_index = np.mean(mixing_index_per_particle)
mixing_index_std = np.std(mixing_index_per_particle)
Expand All @@ -86,5 +86,5 @@ def mixing_index_nearest_neighbors_loop(i):
self.mixing_index[i] = mixing_index
self.mixing_index_std[i] = mixing_index_std

self.parallel_run(mixing_index_nearest_neighbors_loop, range(len(self.list_vtu)), tqdm_desc = "Calculating mixing index")
self.parallel_run(mixing_index_nearest_neighbors_loop, range(len(self.list_pvtu)), tqdm_desc = "Calculating mixing index")

35 changes: 27 additions & 8 deletions contrib/postprocessing/lethe_pyvista_tools/_modify_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,20 +83,39 @@ def modify_array(self, reference_array_name = "ID", array_name = "new_array", re
if restart_array or array_name not in df.array_names:
# Create array if it does not exist
new_array = np.repeat(standard_value, len(df[reference_array_name]))

new_array = new_array.astype(np.float32)

print(f"Creating array '{array_name}' with standard_value {standard_value}")

# Push array to all pyvista arrays
global create_array
def create_array(i):
if self.df_available:
self.df[i][array_name] = np.repeat(standard_value, len(self.df[i][reference_array_name]))
self.df[i][array_name] = new_array

else:
df = self.get_df(i)
df[array_name] = np.repeat(standard_value, len(df[reference_array_name]))
df.save(f'{self.path_output}/{self.list_vtu[i]}')
df[array_name] = new_array
new_pdata_line = f' <DataArray type="Float32" Name="{array_name}" format="ascii"/>\n'
with open(f'{self.path_output}/{self.list_pvtu[i]}', 'r') as f:
# Read the file contents
lines = f.readlines()

# Find the closing tag of the PPointData section
for j, line in enumerate(lines):
if line.strip() == "</PPointData>":
# Insert the new line just before the closing tag
lines.insert(j, new_pdata_line)
break

self.parallel_run(create_array, range(len(self.list_vtu)), tqdm_desc = f"Creating array {array_name}")
# Write the updated contents back to the file
with open(f'{self.path_output}/{self.list_pvtu[i]}', 'w') as file:
file.writelines(lines)

df.save(f'{self.path_output}/{self.list_vtu[i]}', binary = False)

self.parallel_run(create_array, range(len(self.list_pvtu)), tqdm_desc = f"Creating array {array_name}")

else:
if self.df_available:
Expand Down Expand Up @@ -213,12 +232,12 @@ def modify_array_loop(i):
new_array[k] = eval(array_values)

# Assign new_array to pyvista dataframe
df[array_name] = new_array
df[array_name] = new_array.astype(np.float32)

if self.df_available:
self.df[i] = df
else:
df.save(f'{self.path_output}/{self.list_vtu[i]}')
df.save(f'{self.path_output}/{self.list_vtu[i]}', binary = False)

# If not time dependent, the condition and array_values will be applied
# at the reference_time_step.
Expand Down Expand Up @@ -289,7 +308,7 @@ def modify_array_loop(i):
self.df[reference_time_step][array_name] = new_array
else:
df_reference[array_name] = new_array
df_reference.save(f'{self.path_output}/{self.list_vtu[reference_time_step]}')
df_reference.save(f'{self.path_output}/{self.list_vtu[reference_time_step]}', binary = False)

# Create dictionary (map) based on reference_array
reference_time_step_dict = dict(zip(df_reference[reference_array_name], df_reference[array_name]))
Expand Down Expand Up @@ -318,4 +337,4 @@ def modify_array_loop(i):
df[array_name][indices] = itemgetter(*keys)(reference_time_step_dict)
df.save(f'{self.path_output}/{self.list_vtu[i]}')

self.parallel_run(modify_array_loop, range(len(self.list_vtu)), tqdm_desc = "Assigning array")
self.parallel_run(modify_array_loop, range(len(self.list_pvtu)), tqdm_desc = "Assigning array")
4 changes: 2 additions & 2 deletions contrib/postprocessing/lethe_pyvista_tools/_sort_by_array.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,9 +28,9 @@ def sort_by_array_loop(i):
for name in df.array_names:
df[name] = df[name][df[reference_array_name].argsort()]

df.save(f'{self.path_output}/{self.list_vtu[i]}')
df.save(f'{self.path_output}/{self.list_vtu[i]}', binary = False)

self.parallel_run(sort_by_array_loop, range(len(self.list_vtu)),
self.parallel_run(sort_by_array_loop, range(len(self.list_pvtu)),
tqdm_desc=f"Sorting dataframe by {reference_array_name}")

self.sorted = True
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@ def write_df_to_pvtu(self, prefix = "mod_"):
# If line matches one of the files
if path in line:

# If pvtu is in list_vtu
if line.split('file="')[1].split('"/>')[0] in self.list_vtu:
# If pvtu is in list_pvtu
if line.split('file="')[1].split('"/>')[0] in self.list_pvtu:
line = line.replace('file="', f'file="{prefix}')
pvd_out.write(line)
read_files_path_list.remove(path)
Expand All @@ -48,7 +48,7 @@ def write_df_to_pvtu(self, prefix = "mod_"):
N_pvtu = len(self.df)
pbar = tqdm(total = N_pvtu, desc="Writing new PVTU and PVD files")
for i in range(len(self.df)):
self.df[i].save(f'{self.path_output}/{prefix}{self.list_vtu[i]}')
self.df[i].save(f'{self.path_output}/{prefix}{self.list_vtu[i]}', binary = False)
pbar.update(1)

print(f"Modified .pvtu and .pvd files with prefix {prefix} successfully written")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ This is an example of how to post-process results obtained in the `Small scale r

.. warning::

Details about installing the module or using it without installing it are available on this `documentation <../../../tools/postprocessing/postprocessing.py>`_.
Details about installing the module or using it without installing it are available on `here <../../../tools/postprocessing/postprocessing.py>`_.


----------------------------------
Expand Down Expand Up @@ -51,7 +51,7 @@ The DEM files used in this example are obtained following the `Small scale rotat
Python Code
---------------

Please, read this `documentation <../../../tools/postprocessing/postprocessing.py>`_ before jumping to the following steps.
Please, read this `documentation <../../../tools/postprocessing/postprocessing_pyvista>`_ before jumping to the following steps.

Constructing the Object
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Expand Down
Loading

0 comments on commit fe3cfb5

Please sign in to comment.