diff --git a/.gitignore b/.gitignore index 82490b68..a0b61c9f 100644 --- a/.gitignore +++ b/.gitignore @@ -5,8 +5,13 @@ __pycache__/ # Temporary output files *.swp *.swo +*.swn *.txt *.out +*.log + +# Generated Python Package files +*.egg-info/ # Files for MacOS and IDEs .DS_store @@ -17,6 +22,8 @@ cmake-build-*/ # Generated SAM Graphs *.gv +venv/ + # Generated images *.svg *.png @@ -26,6 +33,13 @@ cmake-build-*/ *.csv *.json +# Tensor files +*.mtx +*.tns + +# Generated formatted tensor files +tensor_*_mode_* + # Generated folders build/ logs/ @@ -38,19 +52,20 @@ compiler/benchmark/ # Generated SAM simulator tests */sim/test/apps/test_*.py -*.gv - +# Tensor files *.mtx *.tns -# Temporary matrices +# Temporary or generated tensor directories tmp_mat*/ +tiles/ +synthetic/ -# Jupyter notebook checkpoints -.ipynb_checkpoints/ +# Generated SAM simulator tests +*/sim/test/apps/test_*.py -# Generated formatted tensor files -tensor_*_mode_* +# Temporary scripts +download_suitesparse_partial.sh -# Tensor files -tiles/ +# Network Filesystem +.nfs* diff --git a/Makefile b/Makefile index 0a4882a9..c55abca2 100644 --- a/Makefile +++ b/Makefile @@ -31,20 +31,20 @@ endif ifeq ("$(NEVA)","ON") CMD := OMP_PROC_BIND=true LD_LIBRARY_PATH=compiler/build/lib/:$(LD_LIBRARY_PATH) numactl -C 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15 -m 0 compiler/build/taco-bench $(BENCHFLAGS) - export SUITESPARSE_PATH=/nobackup/owhsu/sparse-datasets/suitesparse/ - export FROSTT_PATH=/nobackup/owhsu/sparse-datasets/frostt/ - export SUITESPARSE_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/suitesparse-formatted - export FROSTT_FORMATTED_TACO_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted/taco-tensor - export FROSTT_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted - export TACO_TENSOR_PATH=/nobackup/owhsu/sparse-datasets + # export SUITESPARSE_PATH=/nobackup/owhsu/sparse-datasets/suitesparse/ + # export FROSTT_PATH=/nobackup/owhsu/sparse-datasets/frostt/ + # export SUITESPARSE_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/suitesparse-formatted + # export FROSTT_FORMATTED_TACO_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted/taco-tensor + # export FROSTT_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted + # export TACO_TENSOR_PATH=/nobackup/owhsu/sparse-datasets else ifeq ("$(LANKA)", "ON") CMD := OMP_PROC_BIND=true LD_LIBRARY_PATH=compiler/build/lib/:$(LD_LIBRARY_PATH) numactl -C 0,2,4,6,8,10,24,26,28,30,32,34 -m 0 compiler/build/taco-bench $(BENCHFLAGS) - export SUITESPARSE_PATH=/data/scratch/changwan/florida_all - export FROSTT_PATH=/data/scratch/owhsu/datasets/frostt - export TACO_TENSOR_PATH=/data/scratch/owhsu/datasets - export SUITESPARSE_FORMATTED_PATH=/data/scratch/owhsu/datasets/suitesparse-formatted - export FROSTT_FORMATTED_TACO_PATH=/data/scratch/owhsu/datasets/frostt-formatted/taco-tensor - export FROSTT_FORMATTED_PATH=/data/scratch/owhsu/datasets/frostt-formatted + # export SUITESPARSE_PATH=/data/scratch/changwan/florida_all + # export FROSTT_PATH=/data/scratch/owhsu/datasets/frostt + # export TACO_TENSOR_PATH=/data/scratch/owhsu/datasets + # export SUITESPARSE_FORMATTED_PATH=/data/scratch/owhsu/datasets/suitesparse-formatted + # export FROSTT_FORMATTED_TACO_PATH=/data/scratch/owhsu/datasets/frostt-formatted/taco-tensor + # export FROSTT_FORMATTED_PATH=/data/scratch/owhsu/datasets/frostt-formatted else CMD := LD_LIBRARY_PATH=compiler/build/lib/:$(LD_LIBRARY_PATH) compiler/build/taco-bench $(BENCHFLAGS) endif @@ -57,16 +57,16 @@ guard-%: # ---- Run SAM python simulator stuff ---- csv: - scripts/pytest_suitesparse_with_benchmarks.sh + scripts/run_sam_sim/pytest_suitesparse_with_benchmarks.sh run-final: submodules - ./scripts/run_suitesparse_final.sh $(realpath ./scripts/tensor_names/$(TENSOR_TXT)) + ./scripts/run_sam_sim/run_suitesparse_final.sh $(realpath ./scripts/tensor_names/$(TENSOR_TXT)) run-gen: submodules - ./scripts/run_suitesparse_generated.sh $(realpath ./scripts/tensor_names/$(TENSOR_TXT)) + ./scripts/run_sam_sim/run_suitesparse_generated.sh $(realpath ./scripts/tensor_names/$(TENSOR_TXT)) tests: sam mkdir -p sam/sim/test/apps - python scripts/test_generating_code.py + python scripts/gen_sam_apps/test_generating_code.py # ---- Build taco and make sam graphs ---- .PHONY: sam @@ -109,18 +109,10 @@ endif # ---- Setup proper environment stuff ---- suitesparse-formats: guard-SUITESPARSE_FORMATTED_PATH guard-SUITESPARSE_PATH rm -rf ${SUITESPARSE_FORMATTED_PATH}/* - set -e && ./scripts/generate_suitesparse_formats.sh + set -e && ./scripts/formatting/generate_suitesparse_formats.sh frostt-formats: taco/build guard-FROSTT_FORMATTED_PATH guard-FROSTT_PATH - ./scripts/generate_frostt_formats.sh - -.PHONY: env -env: - export SUITESPARSE_PATH=/nobackup/owhsu/sparse-datasets/suitesparse/ - export FROSTT_PATH=/nobackup/owhsu/sparse-datasets/frostt/ - export SUITESPARSE_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/suitesparse-formatted - export FROSTT_FORMATTED_TACO_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted/taco-tensor - export FROSTT_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted + ./scripts/formatting/generate_frostt_formats.sh .PHONY: pydepends pydepends: diff --git a/compiler/baseline.cpp b/compiler/baseline.cpp index 9919a762..f911d9b0 100644 --- a/compiler/baseline.cpp +++ b/compiler/baseline.cpp @@ -315,7 +315,8 @@ enum SuiteSparseOp { SDDMM = 4, MATTRANSMUL = 5, RESIDUAL = 6, - MMADD = 7 + MMADD = 7, + MMMUL = 8 }; std::string opName(SuiteSparseOp op) { @@ -341,6 +342,9 @@ std::string opName(SuiteSparseOp op) { case MMADD: { return "mmadd"; } + case MMMUL: { + return "mmmul" + } default: return ""; } @@ -467,6 +471,13 @@ static void bench_suitesparse(benchmark::State &state, SuiteSparseOp op, int fil result(i, j) = ssTensor(i, j) + otherShifted(i, j); break; } + case MMMUL: { + result = Tensor("result", ssTensor.getDimensions(), ssTensor.getFormat(), fill_value); + + IndexVar i, j, k; + result(i, j) = ssTensor(i, j) * otherShifted(i, j); + break; + } case MATTRANSMUL: { result = Tensor("result", {DIM1}, Format(Sparse), fill_value); @@ -516,4 +527,5 @@ static void bench_suitesparse(benchmark::State &state, SuiteSparseOp op, int fil // TODO: need to fix for DCSC for this TACO_BENCH_ARGS(bench_suitesparse, mat_mattransmul, MATTRANSMUL); TACO_BENCH_ARGS(bench_suitesparse, matmul_spmm, SPMM); + TACO_BENCH_ARGS(bench_suitesparse, mat_elemmul, MMMUL); diff --git a/count_nnz_tiling.py b/count_nnz_tiling.py new file mode 100644 index 00000000..c013fa4f --- /dev/null +++ b/count_nnz_tiling.py @@ -0,0 +1,52 @@ +import glob +import sys +def count_nonzeros(matrix_values_file): + with open(matrix_values_file, 'r') as values_file: + matrix_values = [float(val) for val in values_file.readlines()] + + nonzeros = sum(1 for val in matrix_values if val != 0) + + return nonzeros + + +tile_dirs = glob.glob("SPARSE_TESTS/MAT_TMP_DIR/tile*") +num_tiles = len(tile_dirs) +limit = 900 +print("there are ", num_tiles, "tiles") + +sparsity_B = 0 +sparsity_C = 0 +# tilesize=int(sys.argv[1])**2 +tot_num_nonzeros = 0 +for tile_num in range(0,num_tiles): + tot_num_nonzeros = 0 + + tensor_C_values_file = f'SPARSE_TESTS/MAT_TMP_DIR/tile{tile_num}/tensor_C_mode_vals' + + num_nonzeros = count_nonzeros(tensor_C_values_file) + tot_num_nonzeros += num_nonzeros + if num_nonzeros >= limit: + print("num_nonzeros: ", num_nonzeros) + print("error! too many nonzeros in INPUT matrices") + raise Exception + + tensor_C_values_file = f'SPARSE_TESTS/MAT_TMP_DIR/tile{tile_num}/tensor_B_mode_vals' + + num_nonzeros = count_nonzeros(tensor_C_values_file) + tot_num_nonzeros += num_nonzeros + if num_nonzeros >= limit: + print("num_nonzeros: ", num_nonzeros) + print("error! too many nonzeros in INPUT matrices") + raise Exception + + + if tot_num_nonzeros >= limit: + print("tot_num_nonzeros: ", tot_num_nonzeros) + print("error! too many nonzeros in matrices") + raise Exception + +sparsity_B /= num_tiles +sparsity_C /= num_tiles + +print("sparsity_B: ", sparsity_B) +print("sparsity_C: ", sparsity_C) \ No newline at end of file diff --git a/environment.yml b/environment.yml index 991dd271..9b648d7c 100644 --- a/environment.yml +++ b/environment.yml @@ -1,4 +1,4 @@ -name: aha +name: sam channels: - pytorch - anaconda @@ -201,4 +201,4 @@ dependencies: - statsmodels==0.13.2 - threadpoolctl==3.1.0 - traitlets==5.3.0 -prefix: /home/oliviahsu/miniconda3/envs/aha +prefix: ~/Anaconda3/envs/sam diff --git a/find_max_tilesize.py b/find_max_tilesize.py new file mode 100644 index 00000000..bab4549a --- /dev/null +++ b/find_max_tilesize.py @@ -0,0 +1,78 @@ +import os +import sys +import glob + +def write_to_line(file_path, line_number, new_content): + with open(file_path, 'r') as file: + lines = file.readlines() + + if line_number > len(lines) or line_number < 1: + # Line number is out of range + return + + lines[line_number - 1] = new_content + '\n' + + with open(file_path, 'w') as file: + file.writelines(lines) + +def check_keyword_in_output(command, keyword): + # Run the command and redirect the output to a file + os.system(f'{command} > output.txt') + + # Read the contents of the file + with open('output.txt', 'r') as file: + output = file.read() + + # Check if the keyword is present in the output + if keyword in output: + # Optionally, you can delete the output file + os.remove('output.txt') + return True + else: + # Optionally, you can delete the output file + os.remove('output.txt') + return False + + +tile_size = 300 +step = 10 + +for _ in range(20): + print("********************") + print("tile size: ", tile_size) + print("step: ", step) + + yaml_file = "sam/sim/src/tiling/memory_config_onyx.yaml" + mem_tile_line = f"Mem_tile_size: {tile_size}" + print(mem_tile_line) + write_to_line(yaml_file, 19, mem_tile_line) + + run_setup_script = "python3 setup_tiling_mat.py > temp.txt" + os.system(run_setup_script) + print(run_setup_script) + + run_tile_pairing = "python3 tile_pairing.py > temp.txt" + os.system(run_tile_pairing) + print(run_tile_pairing) + + run_count = "python3 count_nnz_tiling.py" + print(run_count) + + if (check_keyword_in_output(run_count, "error")) == False: + tile_size += step + step *= 2 + else: + print("****************Tile broken!") + tile_size -= step + step //= 2 + + if tile_size == 450: + break + + if step == 0: + if _ >= 15: + step = 10 + else: + break + +print("max tile size: ", tile_size) diff --git a/generate_spmv_sparsity_sweep.py b/generate_spmv_sparsity_sweep.py new file mode 100644 index 00000000..3d2f00e0 --- /dev/null +++ b/generate_spmv_sparsity_sweep.py @@ -0,0 +1,164 @@ +#script to generate 50 random 3D tensors (seeded, produces same 50 each time) +import numpy as np +import random +import os +import scipy.io as sio +import scipy.sparse as sps +# from scipy.io import mmread + +# Set the seed value +#previously used to be this: seed_value = 42 +seed_value = 100 +random.seed(seed_value) +np.random.seed(seed_value) + +#generating matrix dimensions and storing results in an array, array size is 2, 1 matrix and 2 dimensions per matrix + +#conditions which need to be met for each set of 3 tensor dimensions: no dimension can't be 0, and can't have a tensor with more than 900 elements (meaning dimension1*dimension2*dimension3 <= 900) +#note try to make it so no dimension is 1 or 2 (gives slight issues later, esp 2nd and 3rd dimensions) +dimensions = [0] * 2 +dimensions_onematrix = [0] * 2 + +# x goes from 0 to __ (before 0 to 49) +for x in range(1): + # dimensions_onematrix[0] = random.randint(1,60) + # dimensions_onematrix[1] = random.randint(3,60) + + # while((dimensions_onetensor[0]*dimensions_onetensor[1]*dimensions_onetensor[2])>901): + # dimensions_onematrix[0] = random.randint(1,60) + # dimensions_onematrix[1] = random.randint(3,60) + # dimensions_onematrix[2] = random.randint(3,60) + dimensions_onematrix[0] = 10 + dimensions_onematrix[1] = 10 + + dimensions[x*3] = dimensions_onematrix[0] + dimensions[(x*3)+1] = dimensions_onematrix[1] + + dimensions_onematrix[0] = 0 + dimensions_onematrix[1] = 0 + #print('\n') + + +#Generating matrix values based on the dimensions now stored in the dimensions (2 elem) array +#i goes from 0 to __ (before 0 to 49) +matrix_num = 1 +randomNumber = 0 +numToInsert = 0 +countnnz = 0 +#can add in as many sparsity numbers here (num elements in the sparsities array = num matrices being generated) +sparsities = [0.5] +# NEED TO CHANGE suitesparse_path for this to work: frostt_path = os.environ['FROSTT_PATH'] +ss_path = '' +for i in range(1): + filename = os.path.join(ss_path, "rand_matrix"+str(matrix_num)+".mtx") + sparsity = sparsities[i] + f = open(filename, "w") + f.write('\n') + lineToAddInFile = "" + arr = np.zeros([dimensions[i*3],dimensions[(i*3)+1]], dtype=int) + for x in range(len(arr)): + for y in range(len(arr[x])): + #TO CHANGE SPARSITY: generate random number from 1 to 9; if 1,2,3,7,8,9 don't add a num in, only add if 4,5,6 + # randomNumber = random.randint(1,9) + randomNumber = random.random() + if(randomNumber>sparsity): + numToInsert = random.randint(1,100) + arr[x][y] = numToInsert + numToInsert = 0 + randomNumber = 0 + #print(arr[x][y][z]) + if(arr[x][y]!=0): + #tensor files are not 0 indexed - say want to insert a point at (0,0,0), then need to feed in (1,1,1) to the tensor file to insert at the (0,0,0) location + lineToAddInFile="" + str(x+1) + " " + str(y+1) + " " + str(arr[x][y]) + countnnz += 1 + f.write(lineToAddInFile + '\n') + # writing in first line in file: + with open(filename, 'r') as f: + content = f.read() + updated_content = ""+str(dimensions[i*3]) + " " + str(dimensions[i*3+1]) + " " + str(countnnz) + content + with open(filename, 'w') as f: + f.write(updated_content) + + with open(filename, 'r') as file: + data = file.readlines() + + header = data.pop(0) + num_rows, num_cols, num_nonzeros = map(int, header.strip().split()) + matrix_data = [] + row_indices = [] + col_indices = [] + for line in data: + row, col, value = map(float, line.strip().split()) + row_indices.append(int(row) - 1) # Convert to 0-based indexing + col_indices.append(int(col) - 1) # Convert to 0-based indexing + matrix_data.append(value) + matrix = sps.coo_matrix((matrix_data, (row_indices, col_indices)), shape=(num_rows, num_cols)) + output_file = os.path.join(ss_path, "rand_matrix"+str(matrix_num)+".mat") + sio.savemat(output_file, {'matrix': matrix}, do_compression=True) + + # vec = sps.random(dimensions[i*3+1], 1, 0, data_rvs=np.ones) + vec = np.ones(dimensions[i*3+1]) + output_file1 = os.path.join(ss_path, "rand_vector"+str(matrix_num)+".mat") + sio.savemat(output_file1, {'vector': vec}, do_compression=True) + + + # f.close() + # a = mmread(filename) + # a.toarray() + # scipy.io.savemat("rand_matrix"+str(matrix_num)+".mat", {'mydata': a}) + + # f.write(""+str(dimensions[i*3]) + " " + str(dimensions[i*3+1]) + " " + str(countnnz)) + # f.write("\n") + matrix_num = matrix_num + 1 + + +#first step: one randomly generated 3D tensor given first set dimensions +#Note: generally if 2/3 elems in a tensor is 0, it can be considered sparse +#approach: 2/3 of the time add in a 0, 1/3 of the time add in an integer from 0 to 100 (use randint to generate num from 1 to 9 inclusive, and depending on where the num is, insert number or not) +#print('dimensions:') +#print(dimensions[0]) +#print(dimensions[1]) +#print(dimensions[2]) +#print('tensor vals') + +""" +arr = np.zeros([dimensions[0],dimensions[1],dimensions[2]], dtype=int) +randomNumber = 0 +numToInsert = 0 +for x in range(len(arr)): + for y in range(len(arr[x])): + for z in range(len(arr[x][y])): + #generate random number from 1 to 9; if 1,2,3,7,8,9 don't add a num in, only add if 4,5,6 + randomNumber = random.randint(1,9) + if(randomNumber==4 or randomNumber==5 or randomNumber==6): + numToInsert = random.randint(1,100) + arr[x][y][z] = numToInsert + numToInsert = 0 + print(arr[x][y][z]) + + #lineToAddInFile="" + str(x) + " " + str(y) + " " + str(z) + " " + str(arr[x][y][z]) + #f.write(lineToAddInFile + '\n') + +print('dimensions:') +print(dimensions[3]) +print(dimensions[4]) +print(dimensions[5]) +print('tensor vals') +arr = np.zeros([dimensions[3],dimensions[4],dimensions[5]], dtype=int) +randomNumber = 0 +numToInsert = 0 +for x in range(len(arr)): + for y in range(len(arr[x])): + for z in range(len(arr[x][y])): + #generate random number from 1 to 9; if 1,2,3,7,8,9 don't add a num in, only add if 4,5,6 + randomNumber = random.randint(1,9) + if(randomNumber==4 or randomNumber==5 or randomNumber==6): + numToInsert = random.randint(1,100) + arr[x][y][z] = numToInsert + numToInsert = 0 + randomNumber = 0 + print(arr[x][y][z]) + + #lineToAddInFile="" + str(x) + " " + str(y) + " " + str(z) + " " + str(arr[x][y][z]) + #f.write(lineToAddInFile + '\n') +""" diff --git a/maximum_tiling.py b/maximum_tiling.py new file mode 100644 index 00000000..2390ed21 --- /dev/null +++ b/maximum_tiling.py @@ -0,0 +1,270 @@ +import glob +import sys +import numpy as np +import scipy +import os +import re + +class EarlyReturn(): + pass + +def get_files_from_dir(path, operands): + operand_files = {} + for operand in operands: + operand_files[operand] = glob.glob(os.path.join(path, f"*{operand}*.mtx")) + + return operand_files +def get_tile_id(string): + indices = [m.start() for m in re.finditer("tile", string)] + if len(indices) >= 2: + substring = string[indices[1] + len("tile") + 1:] + substring = substring.rstrip(".mtx") + numbers = substring.split("_") + return numbers + +def pair_tiles(app_name): + path = f"tiles/{app_name}/mtx" + tile_pairing = {} + + operands = [] + if "matmul" in app_name: + operands = ["B", "C"] + operand_files = get_files_from_dir(path, operands) + b_tensors = operand_files["B"] + c_tensors = operand_files["C"] + + tile = 0 + for b in b_tensors: + for c in c_tensors: + b_loc = get_tile_id(b) + c_loc = get_tile_id(c) + if (b_loc[1] == c_loc[0] and b_loc[3] == c_loc[2]): + tile_pairing[tile] = [b, c] + tile += 1 + elif "elemmul" in app_name: + operands = ["B", "C"] + operand_files = get_files_from_dir(path, operands) + b_tensors = operand_files["B"] + c_tensors = operand_files["C"] + + tile = 0 + for b in b_tensors: + for c in c_tensors: + b_loc = get_tile_id(b) + c_loc = get_tile_id(c) + if (b_loc == c_loc): + tile_pairing[tile] = [b, c] + tile += 1 + elif "elemadd3" in app_name: + operands = ["B", "C", "D"] + operand_files = get_files_from_dir(path, operands) + b_tensors = operand_files["B"] + c_tensors = operand_files["C"] + d_tensors = operand_files["D"] + + tile = 0 + for b in b_tensors: + for c in c_tensors: + b_loc = get_tile_id(b) + c_loc = get_tile_id(c) + if (b_loc != c_loc): + continue + + for d in d_tensors: + d_loc = get_tile_id(d) + if (b_loc == c_loc and c_loc == d_loc): + tile_pairing[tile] = [b, c, d] + tile += 1 + + elif "mat_mask_tri" in app_name: + operands = ["B", "C", "D"] + operand_files = get_files_from_dir(path, operands) + b_tensors = operand_files["B"] + c_tensors = operand_files["C"] + d_tensors = operand_files["D"] + + tile = 0 + for b in b_tensors: + for c in c_tensors: + b_loc = get_tile_id(b) + c_loc = get_tile_id(c) + if not (b_loc[0] == c_loc[0] and b_loc[2] == c_loc[2]): + continue + + for d in d_tensors: + d_loc = get_tile_id(d) + if(c_loc[1] == d_loc[0] and c_loc[3] == d_loc[2] and b_loc[1] == d_loc[1] and b_loc[3] == d_loc[3] and b_loc[0] == c_loc[0] and b_loc[2] == c_loc[2]): + tile_pairing[tile] = [b, c, d] + tile += 1 + elif "mat_vecmul_iter" in app_name: + operands = ["B", "C", "D", "E", "f"] + operand_files = get_files_from_dir(path, operands) + b_tensors = operand_files["B"] + c_tensors = operand_files["C"] + d_tensors = operand_files["D"] + e_tensors = operand_files["E"] + f_tensors = operand_files["f"] + + tile = 0 + + for b in b_tensors: + for c in c_tensors: + b_loc = get_tile_id(b) + c_loc = get_tile_id(c) + if not (b_loc[1] == c_loc[0] and b_loc[3] == c_loc[2]): + continue + for d in d_tensors: + d_loc = get_tile_id(d) + # check k coord + if not (c_loc[1] == d_loc[0] and c_loc[3] == d_loc[2]): + continue + for e in e_tensors: + e_loc = get_tile_id(e) + # check l coord + if not (d_loc[1] == e_loc[0] and d_loc[3] == e_loc[2]): + continue + for f in f_tensors: + f_loc = get_tile_id(f) + if (d_loc[1] == e_loc[0] and d_loc[3] == e_loc[2] and c_loc[1] == d_loc[0] and c_loc[3] == d_loc[2] and b_loc[1] == c_loc[0] and b_loc[3] == c_loc[2] and e_loc[1] == f_loc[0] and e_loc[3] == f_loc[1]): + tile_pairing[tile] = [b, c, d, e, f] + tile += 1 + + + + + return tile_pairing + +def read_mtx(mtx_path): + matrix = scipy.io.mmread(mtx_path) + arr = np.array(matrix.todense()) + return arr + +def compute_outputs(tile_pairing, app_name, limit=900): + for key, value in tile_pairing.items(): + if "matmul" in app_name: + B_mat = read_mtx(value[0]) + C_mat = read_mtx(value[1]) + C_mat = np.transpose(C_mat) + out = np.matmul(B_mat, C_mat) + if np.count_nonzero(out) > limit or np.count_nonzero(B_mat) > limit or np.count_nonzero(C_mat) > limit: + # if np.any(out): + print("tile = ", key) + print("B_tile_ID = ", value[0]) + print("C_tile_ID = ", value[1]) + print("out = ", out) + print("count = ", np.count_nonzero(out)) + return EarlyReturn() + elif "elemmul" in app_name: + B_mat = read_mtx(value[0]) + C_mat = read_mtx(value[1]) + out = np.multiply(B_mat, C_mat) + # if np.any(out): + if np.count_nonzero(out) > limit or np.count_nonzero(B_mat) > limit or np.count_nonzero(C_mat) > limit: + # if np.count_nonzero(out) > limit or (np.count_nonzero(B_mat) + np.count_nonzero(C_mat)) > limit: + print("tile = ", key) + print("B_tile_ID = ", value[0]) + print("C_tile_ID = ", value[1]) + print("out = ", out) + print("count = ", np.count_nonzero(out)) + return EarlyReturn() + elif "elemadd3" in app_name: + B_mat = read_mtx(value[0]) + C_mat = read_mtx(value[1]) + D_mat = read_mtx(value[2]) + + out = np.add(np.add(B_mat, C_mat), D_mat) + # if np.any(out): + if np.count_nonzero(out) > limit or np.count_nonzero(B_mat) > limit or np.count_nonzero(C_mat) > limit or np.count_nonzero(D_mat) > limit: + # if np.count_nonzero(out) > limit or (np.count_nonzero(B_mat) + np.count_nonzero(C_mat)) > limit: + print("tile = ", key) + print("B_tile_ID = ", value[0]) + print("C_tile_ID = ", value[1]) + print("D_tile_ID = ", value[2]) + print("out = ", out) + print("count = ", np.count_nonzero(out)) + return EarlyReturn() + elif "mat_mask_tri" in app_name: + B_mat = read_mtx(value[0]) + C_mat = read_mtx(value[1]) + D_mat = read_mtx(value[2]) + D_mat = np.transpose(D_mat) + out = np.sum(np.multiply(np.matmul(C_mat, D_mat), B_mat)) + if np.count_nonzero(out) > limit or np.count_nonzero(B_mat) > limit or np.count_nonzero(C_mat) > limit or np.count_nonzero(D_mat) > limit: + print("tile = ", key) + print("B_tile_ID = ", value[0]) + print("C_tile_ID = ", value[1]) + print("D_tile_ID = ", value[2]) + print("out = ", out) + print("count = ", np.count_nonzero(out)) + return EarlyReturn() + elif "mat_vecmul_iter" in app_name: + B_mat = read_mtx(value[0]) + C_mat = read_mtx(value[1]) + D_mat = read_mtx(value[2]) + E_mat = read_mtx(value[3]) + f_mat = read_mtx(value[4]) + # we transpose bc we swap in copy formatted + f_mat = np.transpose(f_mat) + out = np.matmul(np.matmul(np.matmul(np.matmul(B_mat, C_mat), D_mat), E_mat), f_mat) + if np.any(out): + # if np.count_nonzero(out) > limit or np.count_nonzero(B_mat) > limit or np.count_nonzero(C_mat) > limit or np.count_nonzero(D_mat) > limit or np.count_nonzero(E_mat) > limit or np.count_nonzero(f_mat) > limit: + print("tile = ", key) + print("B_tile_ID = ", value[0]) + print("C_tile_ID = ", value[1]) + print("D_tile_ID = ", value[2]) + print("E_tile_ID = ", value[3]) + print("f_tile_ID = ", value[4]) + print("out = ", out) + print("count = ", np.count_nonzero(out)) + breakpoint() + return EarlyReturn() + return None + +def find_optimal_tilesize(app_name, datum, initial=30, step_size=10): + tile_size = initial + max_tile_size = initial + prev_tile_pairing = None + + # while True: + for _ in range(50): + call_tiling = f"python3 setup_tiling_mat.py {app_name} {datum} {tile_size} > temp.txt" + os.system(call_tiling) + print(call_tiling) + + tile_pairing = pair_tiles(app_name) + exit_status = compute_outputs(tile_pairing, app_name) + if isinstance(exit_status, EarlyReturn): + max_tile_size = tile_size - step_size + return max_tile_size, prev_tile_pairing + + tile_size += step_size + print("***********************") + print("tile size = ", tile_size) + print("***********************") + prev_tile_pairing = tile_pairing + + return tile_size, prev_tile_pairing + + +if __name__ == "__main__": + max_list = {} + # for i in range(1, 11): + app_name = "matmul_ijk" + datum = "N_biocarta" + + # tile_pairing = pair_tiles(app_name) + # compute_outputs(tile_pairing, app_name) + + max_tile_size, tile_pairing = find_optimal_tilesize(app_name, datum, initial=40, step_size=10) + print("-"*20) + print(f"MAX TILESIZE for {app_name}, {datum}: {max_tile_size}") + print(f"NUMBER OF TILES: {len(tile_pairing.keys())}") + print("-"*20) + + max_list[datum] = [max_tile_size, len(tile_pairing.keys())] + + call_tiling = f"python3 setup_tiling_mat.py {app_name} {datum} {max_tile_size} > temp.txt" + os.system(call_tiling) + print(call_tiling) + + # print(max_list) \ No newline at end of file diff --git a/sam/onyx/synthetic/generate_fixed_nnz_mats.py b/sam/onyx/synthetic/generate_fixed_nnz_mats.py index ec099dfb..6671fcc2 100644 --- a/sam/onyx/synthetic/generate_fixed_nnz_mats.py +++ b/sam/onyx/synthetic/generate_fixed_nnz_mats.py @@ -1,7 +1,7 @@ import scipy.io import scipy.sparse import numpy as np - +import argparse def generate_mat(nnz, dim): return scipy.sparse.random(dim, dim, nnz / (dim**2), data_rvs=np.ones) @@ -14,9 +14,21 @@ def write_mtx(path, t): if __name__ == "__main__": seed = 0 np.random.seed(seed) - # 1024 - dims = list(range(1024, 15721, 1336)) - nnzs = [5000, 10000, 25000, 50000] + + parser = argparse.ArgumentParser(description="Create some random matrices of given nnz and dim") + parser.add_argument('--nnz', type=int, nargs='+', help='nnz') + parser.add_argument('--dim', type=int, nargs='+', help='dim') + parser.add_argument('--extensor', action='store_true', help='generate extensor dims and nnzs') + args = parser.parse_args() + + + if args.extensor: + dims = list(range(1024, 15721, 1336)) + nnzs = [5000, 10000, 25000, 50000] + else: + dims = args.dim + nnzs = args.nnz + print("RUNNING:", dims, nnzs) for nnz in nnzs: for dim in dims: diff --git a/sam/sim/src/accumulator.py b/sam/sim/src/accumulator.py index a3500369..43cb8e5e 100644 --- a/sam/sim/src/accumulator.py +++ b/sam/sim/src/accumulator.py @@ -40,11 +40,6 @@ def set_backpressure(self, backpressure): if not backpressure: self.ready_backpressure = False - def fifo_available(self, br=""): - if self.backpressure_en: - return self.fifo_avail - return True - def update_ready(self): if self.backpressure_en: if len(self.in_val) > self.depth: @@ -52,11 +47,6 @@ def update_ready(self): else: self.fifo_avail = True - def add_child(self, child=None, branch=""): - if self.backpressure_en and child is not None: - self.backpressure.append(child) - self.branch.append(branch) - def update(self): self.update_ready() self.update_done() @@ -202,7 +192,7 @@ def update(self): if self.debug: if self.seen_done or self.done: print(self.seen_done, self.done) - print("@@@", self.outer_crdpt, self.inner_crdpt, self.in_val, self.emit_output, + print("current point value", self.outer_crdpt, self.inner_crdpt, self.in_val, self.emit_output, self.curr_in_outer_crdpt, self.curr_in_inner_crdpt, self.curr_val) self.print_debug() if len(self.in_val) > 0 and self.in_val[0] == "D": @@ -239,12 +229,6 @@ def update(self): self.curr_in_inner_crdpt = self.inner_crdpt.pop(0) ocrd = self.outer_crdpt.pop(0) - # if self.curr_in_val == 'D': - # print(self.curr_in_val, self.curr_in_inner_crdpt, ocrd) - # assert self.curr_in_val == "D" and self.curr_in_inner_crdpt == "D" and ocrd == "D" - # print("######", ocrd, self.curr_in_outer_crdpt, self.curr_in_inner_crdpt, self.emit_output) - # print(self.in_val, self.outer_crdpt, self.inner_crdpt, ocrd - # self.curr_in_outer_crdpt, self.curr_in_inner_crdpt, self.curr_in_val) emit_output = ocrd != self.curr_in_outer_crdpt and self.curr_in_outer_crdpt is not None and \ self.curr_in_outer_crdpt != "D" if emit_output: @@ -271,17 +255,11 @@ def update(self): self.seen_done = True else: self.storage[self.curr_in_outer_crdpt] = {self.curr_in_inner_crdpt: self.valtype(self.curr_in_val)} - # if self.curr_in_outer_crdpt == "D": - # print("__________", self.emit_output, self.seen_done) if len(self.emit_output) > 0: fiber = self.emit_output[0] self.curr_outer_crdpt = fiber[0] - # print("===, ", self.storage) - # print(fiber) - # print(self.emit_output) - # print(self.storage[self.curr_outer_crdpt].keys(), fiber[1]) self.curr_inner_crdpt = min( [item for item in self.storage[self.curr_outer_crdpt].keys() if item > fiber[1]]) self.curr_val = self.storage[self.curr_outer_crdpt][self.curr_inner_crdpt] @@ -433,26 +411,6 @@ def set_backpressure(self, backpressure): if not backpressure: self.ready_backpressure = False - # FIXME: (owhsu) This code is unreachable - def fifo_available(self, br=""): - assert False - if self.backpressure_en: - if br == "inner": - # and len(self.in_inner_crdpt) > self.depth: - return self.fifo_avail_inner - if br == "outer": # and len(self.in_outer_crdpt) > self.depth: - return self.fifo_avail_outer # return False - if br == "val": # and len(self.in_val) > self.depth: - return self.fifo_avail_val # return False - # return True - return True - - def add_child(self, child=None, branch=""): - if self.backpressure_en: - if child is not None: - self.backpressure.append(child) - self.branch.append(branch) - def update_ready(self): if self.backpressure_en: if len(self.in_inner_crdpt) > self.depth: @@ -480,16 +438,13 @@ def update(self): print(self.in_outer_crdpt, self.in_inner_crdpt, self.in_val) print(self.crdpt_spacc.print_debug()) print(self.crdpt_converter.print_debug()) - if self.done: + if self.done and self.memory_model_en: f1, f2, f3 = self.crdpt_spacc.return_fifo() f4, f5 = self.crdpt_converter.return_fifo() self.crdpt_spacc = SparseCrdPtAccumulator1(maxdim=self.temp_maxdim, valtype=self.temp_valtype, fifos=[f1, f2, f3]) self.crdpt_converter = CrdPtConverter(last_level=self.temp_last_level, fifos=[f4, f5]) - # FIXME: (owhsu) self.data_ready not defined in init - if self.backpressure_en: - self.data_ready = True if len(self.in_outer_crdpt) > 0 or len(self.in_inner_crdpt) > 0: self.block_start = False @@ -509,7 +464,7 @@ def update(self): self.crdpt_spacc.set_val(self.in_val.pop(0)) self.crdpt_spacc.update() - print(">>>>>>>>>>>>SPACC:", self.crdpt_spacc.out_outer_crdpt(), self.crdpt_spacc.out_inner_crdpt()) + # print(">>>>>>>>>>>>SPACC:", self.crdpt_spacc.out_outer_crdpt(), self.crdpt_spacc.out_inner_crdpt()) self.crdpt_converter.set_outer_crdpt(self.crdpt_spacc.out_outer_crdpt()) self.crdpt_converter.set_inner_crdpt(self.crdpt_spacc.out_inner_crdpt()) @@ -854,12 +809,6 @@ def set_backpressure(self, backpressure): if not backpressure: self.ready_backpressure = False - def add_child(self, child=None, branch=""): - if self.backpressure_en: - if child is not None: - self.backpressure.append(child) - self.branch.append(branch) - def update_ready(self): if self.backpressure_en: if len(self.in0_crdpt) > self.depth: @@ -881,7 +830,7 @@ def update(self): if self.backpressure_en: self.data_valid = False if (self.backpressure_en and self.check_backpressure()) or not self.backpressure_en: - if self.done: + if self.done and self.memory_model_en: f1, f2, f3 = self.crdpt_spacc.return_fifo() f4, f5 = self.crdpt_converter.return_fifo() self.crdpt_spacc = SparseCrdPtAccumulator2(maxdim=self.temp_maxdim, valtype=self.temp_valtype, diff --git a/sam/sim/src/array.py b/sam/sim/src/array.py index c05a017f..669baef9 100644 --- a/sam/sim/src/array.py +++ b/sam/sim/src/array.py @@ -44,15 +44,10 @@ def set_fifo(self, fifo): def get_fifo(self): return self.load_addrs - def add_child(self, child, branch=""): - if self.backpressure_en: - if child is not None: - self.backpressure.append(child) - self.branch.append(branch) - def set_path(self, path): self.path = path + # FIXME(ritvik): fix the initialization of array def reintilialize_arrs(self, load_vals, fifo): self.arr = load_vals self.set_fifo(fifo) @@ -111,12 +106,6 @@ def update(self): self.store(store_tup[0], store_tup[1]) self.store_en = False - def fifo_available(self, br=""): - if self.backpressure_en: - if len(self.load_addrs) > 1: - return False - return True - def update_ready(self): if self.backpressure_en: if len(self.load_addrs) > self.depth: diff --git a/sam/sim/src/base.py b/sam/sim/src/base.py index 2ba86530..b4556033 100644 --- a/sam/sim/src/base.py +++ b/sam/sim/src/base.py @@ -86,7 +86,7 @@ def larger_stkn(a, b): class Primitive(ABC): - def __init__(self, debug=False, statistics=False, name="", back_en=False, **kwargs): + def __init__(self, debug=False, statistics=False, name="", back_en=False, memory_model_en=False, **kwargs): self.name = name self.done = False self.debug = debug @@ -97,6 +97,7 @@ def __init__(self, debug=False, statistics=False, name="", back_en=False, **kwar self.get_stats = statistics self.backpressure_en = back_en + self.memory_model_en = memory_model_en def out_done(self): return self.done diff --git a/sam/sim/src/channel.py b/sam/sim/src/channel.py index 92690dcc..4582531e 100644 --- a/sam/sim/src/channel.py +++ b/sam/sim/src/channel.py @@ -219,6 +219,7 @@ def input_token_(self, token): self.downstream_token = token +# FIXME: Follow code style and fix class naming convention and make sure it's base is primitive... class memory_block(): def __init__(self, name="B", skip_blocks=False, element_size=2, level=None, indexes=2, size=1000 * 2, nbuffer=False, latency=10, debug=False, bandwidth=2, diff --git a/sam/sim/src/joiner.py b/sam/sim/src/joiner.py index 51089e73..488c7bee 100644 --- a/sam/sim/src/joiner.py +++ b/sam/sim/src/joiner.py @@ -57,11 +57,6 @@ def update_ready(self): else: self.fifo_avail_in2 = True - def add_child(self, child=None, branch=""): - if self.backpressure_en: - self.backpressure.append(child) - self.branches.append(branch) - def set_in1(self, in_ref1, in_crd1, parent=None): if in_ref1 != '' and in_crd1 != '' and in_ref1 is not None and in_crd1 is not None: # print(in_ref1, " ", in_crd1) diff --git a/sam/sim/src/rd_scanner.py b/sam/sim/src/rd_scanner.py index feece99e..08b3690a 100644 --- a/sam/sim/src/rd_scanner.py +++ b/sam/sim/src/rd_scanner.py @@ -44,18 +44,6 @@ def out_crd(self, child=None): if (self.backpressure_en and self.data_valid) or not self.backpressure_en: return self.curr_crd - def add_child(self, child, branch=""): - if self.backpressure_en and child is not None: - self.backpressure.append(child) - self.branches.append(branch) - - def fifo_available(self, br=""): - if self.backpressure_en: - return self.fifo_avail - # and len(self.in_ref) > self.depth: - # return False - return True - def update_ready(self): if self.backpressure_en: if len(self.in_ref) > self.depth: @@ -92,11 +80,11 @@ def __init__(self, dim=0, depth=4, **kwargs): def update(self): self.update_done() self.update_ready() - if len(self.in_ref) > 0: - self.block_start = False if self.backpressure_en: self.data_valid = False if (self.backpressure_en and self.check_backpressure()) or not self.backpressure_en: + if len(self.in_ref) > 0: + self.block_start = False if self.backpressure_en: self.data_valid = True if self.emit_tkn and len(self.in_ref) > 0: @@ -246,6 +234,7 @@ def __init__(self, crd_arr=[], seg_arr=[], skip=True, depth=1, tile_size=None, f if fifo is not None: self.set_fifo(fifo) + # FIXME (Ritvik): Use reinitialize array isntead of redeclaring the rd scanner def reinitialize_arrs(self, seg_arr, crd_arr, fifo): # assert False self.start_addr = 0 @@ -281,11 +270,6 @@ def set_fifo(self, fifo): def get_fifo(self): return self.in_ref - def fifo_available(self, br=""): - if self.backpressure_en and len(self.in_ref) > self.depth: - return False - return True - def set_in_ref(self, in_ref, parent=None): if in_ref != '' and in_ref is not None: self.in_ref.append(in_ref) @@ -366,6 +350,9 @@ def update(self): if (self.backpressure_en and self.check_backpressure()) or not self.backpressure_en: if self.backpressure_en: self.data_valid = True + if len(self.in_ref) > 0: + self.block_start = False + # Process skip token first and save if len(self.in_crd_skip) > 0 and self.skip_processed: self.curr_skip = self.in_crd_skip.pop(0) diff --git a/sam/sim/src/repeater.py b/sam/sim/src/repeater.py index 66ccfebe..956506ad 100644 --- a/sam/sim/src/repeater.py +++ b/sam/sim/src/repeater.py @@ -52,11 +52,6 @@ def update_ready(self): else: self.fifo_avail_repeat = True - def add_child(self, child=None, branch=""): - if self.backpressure_en: - self.backpressure.append(child) - self.branches.append(branch) - def update(self): self.update_done() self.update_ready() @@ -359,11 +354,6 @@ def update_ready(self): else: self.fifo_avail = True - def add_child(self, child=None, branch=""): - if self.backpressure_en: - self.backpressure.append(child) - self.branches.append(branch) - # input can either be coordinates or references def set_istream(self, istream, parent=None): if istream != '' and istream is not None: diff --git a/sam/sim/src/tiling/memory_config_onyx.yaml b/sam/sim/src/tiling/memory_config_onyx.yaml index 66779d32..a6fa9d35 100644 --- a/sam/sim/src/tiling/memory_config_onyx.yaml +++ b/sam/sim/src/tiling/memory_config_onyx.yaml @@ -15,5 +15,5 @@ Bytes_per_element: 2 # Number n_levels: 3 level_names: ["Main", "Glb", "Mem"] Main_tile_size: None -Glb_tile_size: 16 # 16 # 120 # n = (nxn) elements -Mem_tile_size: 16 # Size of one dense dimension. 8 = (8x8) +Glb_tile_size: 8 # 8 = (8x8) = 64 elements +Mem_tile_size: 30 diff --git a/sam/sim/src/tiling/tile.py b/sam/sim/src/tiling/tile.py index edaf29d0..47cf15a0 100644 --- a/sam/sim/src/tiling/tile.py +++ b/sam/sim/src/tiling/tile.py @@ -1,21 +1,38 @@ import numpy as np import scipy.sparse +import scipy.io import os import argparse import ast import yaml import copy import pickle +import random +import sparse +import sys -from itertools import compress from pathlib import Path -from sam.util import SuiteSparseTensor, InputCacheSuiteSparse, ScipyTensorShifter -from sam.sim.src.tiling.process_expr import parse_all, update_dict -SAM_STRS = {"matmul_ikj": "X(i,j)=B(i,k)*C(k,j) -f=X:ss -f=B:ss -f=C:ss -s=reorder(i,k,j)", - "matmul_ijk": "X(i,j)=B(i,k)*C(k,j) -f=X:ss -f=B:ss -f=C:ss:1,0 -s=reorder(i,j,k)", - "mat_elemmul": "X(i,j)=B(i,j)*C(i,j) -f=X:ss -f=B:ss -f=C:ss -s=reorder(i,j)", - "mat_elemadd": "X(i,j)=B(i,j)+C(i,j) -f=X:ss -f=B:ss -f=C:ss -s=reorder(i,j)"} +from sam.util import SUITESPARSE_PATH, SuiteSparseTensor, InputCacheSuiteSparse, ScipyTensorShifter, \ + FROSTT_PATH, FrosttTensor, PydataSparseTensorDumper, InputCacheTensor, constructOtherMatKey, constructOtherVecKey +from sam.sim.src.tiling.process_expr import parse_all + +# FIXME: This should not be here... Set your SAM_HOME directory +custom_path = '/home/avb03/sam' +sys.path.append(custom_path) + +SAM_STRS = {"matmul_kij": "X(i,j)=B(i,k)*C(k,j) -f=X:ss -f=B:ss:1,0 -f=C:ss -s=reorder(k,i,j)", + "matmul_ikj": "X(i,j)=B(i,k)*C(k,j) -f=X:ss -f=B:ss -f=C:ss -s=reorder(i,k,j)", + "matmul_ijk": "X(i,j)=B(i,k)*C(k,j) -f=X:ss -f=B:ss -f=C:ss:1,0 -s=reorder(i,j,k)", + "mat_elemadd": "X(i,j)=B(i,j)+C(i,j) -f=X:ss -f=B:ss -f=C:ss:1,0 -s=reorder(i,j,k)", + "mat_elemmul": "X(i,j)=B(i,j)*C(i,j) -f=X:ss -f=B:ss -f=C:ss:1,0 -s=reorder(i,j,k)", + "mat_mattransmul": "X(i,j)=B(j,i)*c(j)+d(i) -f=X:ss -f=B:ss -f=c:ss:0 -f=d:ss:0 -s=reorder(i,j)", + "mat_vecmul_ij" : "X(i,j)=B(i,j)*c(j) -f=X:ss -f=B:ss -f=c:ss:0 -s=reorder(i,j)", + "mat_residual": "X(i,j)=b(i)-C(i,j)*d(j) -f=X:ss -f=C:ss -f=b:ss:0 -f=d:ss:0 -s=reorder(i,j)", + "mat_sddmm": "X(i,j)=B(i,j)*C(i,k)*D(k,j) -f=X:ss -f=B:ss -f=C:dd -f=D:dd:1,0 -s=reorder(i,j,k)", + "mat_elemadd3": "X(i,j)=B(i,j)+C(i,j)+D(i,j) -f=X:ss -f=B:ss -f=C:ss -f=D:ss", + "mat_mask_tri": "X(i,j)=B(i,j)*C(i,k)*D(k,j) -f=X:ss -f=B:ss -f=C:ss -f=D:ss:1,0 -s=reorder(i,j,k)", + "mat_vecmul_iter": "X(i,j)=B(i,j)*C(j,k)*D(k,l)*E(l,m)*f(m) -f=X:ss -f=B:ss -f=C:ss -f=D:ss -f=E:ss -f=f:s -s=reorder(i,j,k,l,m)"} def print_dict(dd): @@ -23,6 +40,12 @@ def print_dict(dd): print(k, ":", v) +def print_ast(node): + for child in ast.iter_child_nodes(node): + print_ast(child) + print(node) + + def get_ivars(names, expr): [lhs, rhs] = expr.split("=") @@ -68,27 +91,102 @@ def parse_sam_input(string): str_arr = sam_str.split(" ") dictionary = parse_all(str_arr, has_quotes=False) + print("dictionary is: ", dictionary) # Assume there are no repeat tensors... tensors = dictionary["rhs_tensors"] + print("tensors are: ", tensors) permutations = [list(map(int, dictionary[tensor]["perm"])) for tensor in tensors] ivars = get_ivars(tensors, str_arr[0]) ivars = [ivars[tensor] for tensor in tensors] + + print("PARSE SAM INPUTS", tensors) return tensors, permutations, ivars +# Outputs Pydata/sparse tensor tiles, given a pydata/sparse tensor (DOK or COO) +# ASSUME: tensor is a scipy.sparse.coo_matrix +# TODO: new_ivar_order right now is assumed to be one fixed order +# In the future, will have to take into acocunt all reorderings +def tile_tensor(tensor, ivar_map, split_map, new_ivar_order=None, tensor_name=""): + human_readable = False + + tiles = dict() + tile_sizes = dict() + order = len(tensor.shape) + + tensor_coo = sparse.COO(tensor) + tensor_points = sparse.DOK.from_coo(tensor_coo) + + print("ivar_map: ", ivar_map) + print("split_map: ", split_map) + print("order = ", order) + + new_shape = [] + for lvl in range(order): + ivar = ivar_map[lvl] + sf = split_map[ivar] + new_shape.append(sf) + + for crds, val in tensor_points.data.items(): + point = list(crds) + + new_point = [] + tile_id = [] + for lvl in range(order): + ivar = ivar_map[lvl] + sf = split_map[ivar] + + new_point.append(point[lvl] % sf) + tile_id.append(int(point[lvl] / sf)) + + # Add in value to the new_point as well + new_point.append(val) + tile_id = tuple(tile_id) + + if tile_id in tiles: + tiles[tile_id].append(new_point) + else: + tiles[tile_id] = [new_point] + + # sort the new coo lists + for key, val in tiles.items(): + if human_readable: + dok = sorted(val) + else: + dok = sparse.DOK(tuple(new_shape)) + for point in val: + dok[tuple(point[0:-1])] = point[-1] + + tiles[key] = dok + + for tile_id, tile_dok in tiles.items(): + tile = tile_dok.to_coo() + # FIXME: This size number isn't correct for tensor tiles + nonempty_rows = tile.nnz + nonempty_row_ind = np.where(nonempty_rows > 0)[0] + tile_sizes[tile_id] = tile.nnz * 2 + 2 * len(nonempty_row_ind) + 3 + + return tiles, tile_sizes + + # Outputs COO tiles, given a COO tensor # ASSUME: tensor is a scipy.sparse.coo_matrix # TODO: new_ivar_order right now is assumed to be one fixed order # In the future, will have to take into acocunt all reorderings -def tile_coo(tensor, ivar_map, split_map, new_ivar_order=None): +def tile_coo(tensor, ivar_map, split_map, new_ivar_order=None, tensor_name=""): human_readable = False tiles = dict() tile_sizes = dict() order = len(tensor.shape) - tensor_points = tensor.todok() + tensor_coo = scipy.sparse.coo_matrix(tensor) + tensor_points = tensor_coo.todok() + + print("ivar_map: ", ivar_map) + print("split_map: ", split_map) + print("order = ", order) new_shape = [] for lvl in range(order): @@ -138,34 +236,45 @@ def tile_coo(tensor, ivar_map, split_map, new_ivar_order=None): # tensor_names: list of tensor names [B,C,D] (from SAM) -# tensors: list of scipy.sparse.coo_matrix following tensor_names (from SAM) +# tensors: list of sparse COO tensors (either Scipy or Pydata/Sparse) following tensor_names (from SAM) # permutation_strs: list of permutation_strs [ss01, ss10] following tensor_names (from SAM) # ivar_strs: list of ivar_strs ["ik", "kj"] following tensor_names (from SAM) # split_map: dictionary of split factors (from hardware) -def cotile_coo(tensor_names, tensors, permutation_strs, ivar_strs, split_map): +def cotile_coo(tensor_names, tensors, permutation_strs, ivar_strs, split_map, higher_order=False): tiled_tensors = dict() tiled_tensor_sizes = dict() + print(tensor_names, tensors, permutation_strs, ivar_strs, split_map) for i, tensor in enumerate(tensors): tensor_name = tensor_names[i] tensor_format = permutation_strs[i] ivar_map = dict() order = len(tensor.shape) + print("order is ", order) for dim in range(order): + print("tensor format: ", tensor_format) + + print("dim is ", dim) + print("tensor_format[dim:dim+1] is ", tensor_format[dim:dim + 1]) + print("tensor name is ", tensor_name) lvl_permutation = tensor_format[dim:dim + 1][0] ivar = ivar_strs[i][dim] ivar_map[lvl_permutation] = ivar + print("ivar_map is ", ivar_map) + + if higher_order: + tiles, tile_sizes = tile_tensor(tensor, ivar_map, split_map, tensor_name=tensor_name) + else: + tiles, tile_sizes = tile_coo(tensor, tensor_name, ivar_map, split_map, tensor_name=tensor_name) - tiles, tile_sizes = tile_coo(tensor, ivar_map, split_map) tiled_tensors[tensor_name] = tiles tiled_tensor_sizes[tensor_name] = tile_sizes return tiled_tensors, tiled_tensor_sizes -def get_other_tensors(app_str, tensor): - tensors = [] - tensors.append(tensor) +def get_other_tensors(app_str, tensor, other_nonempty=True): + tensors = [tensor] if "matmul" in app_str: print("Writing shifted...") @@ -187,24 +296,116 @@ def get_other_tensors(app_str, tensor): tensors.append(shifted) elif "mat_sddmm" in app_str: - pass - elif "mat_mattransmul" in app_str or "mat_residual" in app_str: - pass + print("Writing other tensors, shifted...") + print("Writing shifted...") + shifted = ScipyTensorShifter().shiftLastMode(tensor) + tensors.append(shifted) + + print("Writing shifted2...") + shifted2 = ScipyTensorShifter().shiftLastMode(shifted) + tensors.append(shifted2) + + elif "mat_mask_tri" in app_str: + print("Writing other tensor 1...") + shifted = ScipyTensorShifter().shiftLastMode(tensor) + tensors.append(shifted) + + print("Writing shifted2...") + shifted2 = ScipyTensorShifter().shiftLastMode(shifted) + tensors.append(shifted2) + elif "mat_vecmul_iter" in app_str: + print("Writing other tensor 1...") + tensors.append(tensor) + tensors.append(tensor) + tensors.append(tensor) + + print("writing other vector...") + tensorName = args.input_tensor + variant = "mode1" + path = constructOtherVecKey(tensorName,variant) + tensor_c_from_path = FrosttTensor(path) + tensor_c = tensor_c_from_path.load().todense() + + # breakpoint() + tensors.append(tensor_c) + + elif "mat_mattransmul" in app_str: + print("Writing other tensors...") + rows, cols = tensor.shape # i,j + tensor_c = scipy.sparse.random(cols, 1, data_rvs=np.ones).toarray().flatten() + # tensor_d = scipy.sparse.random(rows, 1, density=1.0, data_rvs=np.ones).toarray().flatten() + tensor_d = scipy.sparse.random(rows, 1, data_rvs=np.ones).toarray().flatten() + + if other_nonempty: + tensor_c[0] = 1 + tensor_d[0] = 1 + + # import pdb; pdb.set_trace() + + tensors.append(tensor_c) + tensors.append(tensor_d) + + elif "mat_residual" in app_str: + print("Writing other tensors...") + rows, cols = tensor.shape + tensor_b = scipy.sparse.random(rows, 1, data_rvs=np.ones).toarray().flatten() + tensor_d = scipy.sparse.random(cols, 1, data_rvs=np.ones).toarray().flatten() + + if other_nonempty: + tensor_b[0] = 1 + tensor_d[0] = 1 + + tensors.insert(0, tensor_b) + tensors.append(tensor_d) + elif "mat_vecmul" in app_str: - pass + print("Writing other tensors...") + tensorName = args.input_tensor + # c(j) use mode1 + + # variant = "mode1" + # path = constructOtherVecKey(tensorName,variant) + # tensor_c_from_path = FrosttTensor(path) + # tensor_c = tensor_c_from_path.load().todense() + + # print("TENSOR SHAPE: ", tensor.shape) + # print("TENSOR_C SHAPE: ", tensor_c.shape) + + rows, cols = tensor.shape + tensor_c = scipy.sparse.random(cols, 1, data_rvs=np.ones).toarray().flatten() + + if other_nonempty: + tensor_c[0] = 1 + + tensors.append(tensor_c) + + elif "tensor3_ttv" in app_str: + print("Writing other tensors...") + size_i, size_j, size_k = tensor.shape # i,j,k + tensor_c = scipy.sparse.random(size_k, 1, data_rvs=np.ones).toarray().flatten() + + if other_nonempty: + tensor_c[0] = 1 + + tensors.append(tensor_c) else: - tensor2 = scipy.sparse.random(tensor.shape[0], tensor.shape[1]) - tensors.append(tensor2) - # raise NotImplementedError + # tensor2 = scipy.sparse.random(tensor.shape[0], tensor.shape[1]) + # tensors.append(tensor2) + raise NotImplementedError return tensors -def cotile_multilevel_coo(app_str, hw_config_fname, tensors, output_dir_path): +def cotile_multilevel_coo(app_str, hw_config_fname, tensors, output_dir_path, higher_order=False): tensors = get_other_tensors(app_str, tensors[0]) names, format_permutations, ivars = parse_sam_input(args.cotile) + print("cotile_multilevel_coo tensors: ", names, "\n", tensors) + + # import pdb + # pdb.set_trace() + sizes_dict = {} for i, name in enumerate(names): tensor = tensors[i] @@ -240,7 +441,8 @@ def cotile_multilevel_coo(app_str, hw_config_fname, tensors, output_dir_path): if cotiled is None: # First iteration of tiling - cotiled, cotiled_sizes = cotile_coo(names, tensors, format_permutations, ivars, split_map) + cotiled, cotiled_sizes = cotile_coo(names, tensors, format_permutations, ivars, split_map, + higher_order) else: # recursively tile the blocks new_cotiled = {} @@ -250,9 +452,13 @@ def cotile_multilevel_coo(app_str, hw_config_fname, tensors, output_dir_path): new_cotiled[name] = {} new_cotiled_sizes[name] = {} for tile_id, tile in cotiled[name].items(): - new_cotiled_temp, new_cotiled_sizes_temp = cotile_coo(name, [tile.tocoo()], + if higher_order: + tile_in_coo = tile.to_coo() + else: + tile_in_coo = tile.tocoo() + new_cotiled_temp, new_cotiled_sizes_temp = cotile_coo(name, [tile_in_coo], [format_permutations[i]], [ivars[i]], - split_map) + split_map, higher_order) for kk, vv in copy.deepcopy(new_cotiled_temp)[name].items(): new_tile_id = tuple(list(tile_id) + list(kk)) @@ -272,34 +478,77 @@ def cotile_multilevel_coo(app_str, hw_config_fname, tensors, output_dir_path): print(exc) -inputCache = InputCacheSuiteSparse() +inputCacheSuiteSparse = InputCacheSuiteSparse() +inputCacheTensor = InputCacheTensor() if __name__ == "__main__": - parser = argparse.ArgumentParser(description='Tile matrices') - parser.add_argument("--input_tensor", type=str, default=None) - parser.add_argument("--gen_tensor", action="store_true") - parser.add_argument("--cotile", type=str, default=None) - parser.add_argument("--output_dir_path", type=str, default="./tiles") - parser.add_argument("--hw_config", type=str, default=None) - parser.add_argument("--multilevel", action="store_true") - parser.add_argument("--input_path", type=str, default=None) - parser.add_argument("--extensor", action="store_true") + parser = argparse.ArgumentParser(description='script that tiles tensors') + parser.add_argument("--tensor_type", choices=['ex', 'gen', 'file', 'ss', 'frostt'], help='The \ + tiles, tile_sizes = tile_coo(tensor, ivar_map, split_map) \ + type of tensor to tile: extensor(ex), generated (gen), \ + SuiteSparse (ss), FROSTT (frostt), or input file (file)') + parser.add_argument("--higher_order", action="store_true", help="If \ + true then we want to process a higher-order tensor. With higher-order set to true, if \ + 'tensor_type' is: \ + \n 'gen' then a 3-tensor is generated instead of matrix. \ + \n 'file' then a .tns file is read instead of a .mtx file. \ + \n 'ss' then other matrices used with SuiteSparse are .tns instead of .mtx files. \ + \n 'frostt' should always have 'higher_order' set as true.") + + parser.add_argument("--input_tensor", type=str, default=None, + help="Input tensor NAME if tensor_type is set to 'file'. \ + This is for use with SuiteSparse or FROSTT") + parser.add_argument("--input_path", type=str, default=None, help="Input tensor path") + parser.add_argument("--output_dir_path", type=str, default="./tiles", + help='Output path, directory where tiles get written to') + parser.add_argument("--hw_config", type=str, default=None, + help='Path to the hardware config yaml') + + parser.add_argument("--cotile", type=str, default=None, help='If \ + this is true cotile multiple tensors, else tile one tensor only') + parser.add_argument("--multilevel", action="store_true", help='If \ + multilevel is true there will exist more than one level of tiles, \ + else only tile once') + parser.add_argument("--seed", type=int, default=0, help="Random seed") + parser.add_argument("--other_nonempty", action="store_true", + help="If this is enabled, the 'other' tensors will have at least one nonzero value") args = parser.parse_args() + random.seed(args.seed) + np.random.seed(args.seed) + tensor = None cwd = os.getcwd() - if args.gen_tensor: - tensor = scipy.sparse.random(16, 16) - elif args.extensor: + inputCache = None + + if args.tensor_type == "gen": + if args.higher_order: + tensor = sparse.COO(sparse.random((16, 16, 16))) + else: + tensor = scipy.sparse.random(16, 16) + elif args.tensor_type == "ex": tensor = scipy.io.mmread(args.input_path) - else: + elif args.tensor_type == "ss": assert args.input_tensor is not None - SS_PATH = os.getenv('SUITESPARSE_PATH', default=os.path.join(cwd, 'suitesparse')) - # print("PATH:", SS_PATH) - tensor_path = os.path.join(SS_PATH, args.input_tensor + ".mtx") + + inputCache = inputCacheSuiteSparse + tensor_path = os.path.join(SUITESPARSE_PATH, args.input_tensor + ".mtx") ss_tensor = SuiteSparseTensor(tensor_path) tensor = inputCache.load(ss_tensor, False) + elif args.tensor_type == "frostt": + assert args.input_tensor is not None + assert args.higher_order + + inputCache = inputCacheTensor + tensor_path = os.path.join(FROSTT_PATH, args.input_tensor + ".tns") + + # FIXME: This is broken + frostt_tensor = FrosttTensor(tensor_path) + tensor = inputCache.load(frostt_tensor, False) + + else: + raise ValueError("This choice of 'tensor_type' is unreachable") split_map = {"i": 16, "j": 16, "k": 16} @@ -311,7 +560,6 @@ def cotile_multilevel_coo(app_str, hw_config_fname, tensors, output_dir_path): print("TILES:") print_dict(tiles) else: - output_mtx_name = os.path.join(args.output_dir_path, args.cotile, "mtx") output_mtx_path = Path(output_mtx_name) output_mtx_path.mkdir(parents=True, exist_ok=True) @@ -320,21 +568,43 @@ def cotile_multilevel_coo(app_str, hw_config_fname, tensors, output_dir_path): if args.multilevel: assert args.cotile is not None cotiled_tensors = cotile_multilevel_coo(args.cotile, args.hw_config, [tensor], - os.path.join(args.output_dir_path, args.cotile)) + os.path.join(args.output_dir_path, + args.cotile), + args.higher_order) elif args.cotile is not None: tensor2 = scipy.sparse.random(tensor.shape[0], tensor.shape[1]) names, format_permutations, ivars = parse_sam_input(args.cotile) - cotiled_tensors = cotile_coo(names, [tensor, tensor2], format_permutations, ivars, split_map) + cotiled_tensors = cotile_coo(names, [tensor, tensor2], + format_permutations, ivars, split_map, args.higher_order) # print(cotiled_tensors) names = cotiled_tensors.keys() for name in names: for tile_id, tile in cotiled_tensors[name].items(): [str(item) for item in tile_id] - filename = "tensor_" + name + "_tile_" + "_".join([str(item) for item in tile_id]) + ".mtx" + filename = "tensor_" + name + "_tile_" + "_".join([str(item) for item in tile_id]) + # filename += ".tns" if args.higher_order else ".mtx" + filename += ".mtx" mtx_path_name = os.path.join(output_mtx_name, filename) print(tile) - print(mtx_path_name, cwd) - scipy.io.mmwrite(mtx_path_name, tile) - print(os.path.exists(mtx_path_name)) + print("Output path:", mtx_path_name) + + if args.higher_order: + if args.tensor_type == "frostt": + tns_dumper = PydataSparseTensorDumper() + print(tile.shape) + print(tile) + tns_dumper.dump(tile, mtx_path_name) + # FIXME: (owhsu) Why did avb03 add this in? + elif len(tile.shape) == 1: + real_shape = tile.shape[0] + # print(np.array(tile.todense()).reshape(1,-1)) + # scipy.io.mmwrite(mtx_path_name, scipy.sparse.coo_matrix(tile.todense()).reshape((real_shape,1))) + scipy.io.mmwrite(mtx_path_name, scipy.sparse.coo_matrix(tile.todense())) + else: + # print(tile.todense()) + scipy.io.mmwrite(mtx_path_name, scipy.sparse.coo_matrix(tile.todense())) + + else: + scipy.io.mmwrite(mtx_path_name, tile) diff --git a/sam/sim/src/wr_scanner.py b/sam/sim/src/wr_scanner.py index 9de01c2e..e835dc07 100644 --- a/sam/sim/src/wr_scanner.py +++ b/sam/sim/src/wr_scanner.py @@ -164,12 +164,8 @@ def update(self): self.update_ready() if self.done: return - # self.arr.print_debug(name="vals") - # self.seg_arr.print_debug(name="seg") if self.debug: print("RESET WR SCAN ", self.input) - # self.reset() - # self.done = False if self.debug: print("post reset: ", self.arr.out_done()) diff --git a/sam/sim/test/advanced-simulator/test_matmul_ikj_tile_pipeline_final.py b/sam/sim/test/advanced-simulator/test_matmul_ikj_tile_pipeline_final.py index 0d97bf04..19ffe48a 100755 --- a/sam/sim/test/advanced-simulator/test_matmul_ikj_tile_pipeline_final.py +++ b/sam/sim/test/advanced-simulator/test_matmul_ikj_tile_pipeline_final.py @@ -30,7 +30,8 @@ ) @pytest.mark.suitesparse def test_matmul_ikj_tiled_sparse(samBench, ssname, check_gold, debug_sim, report_stats, - skip_empty, yaml_name, nbuffer, backpressure, depth, nnz_value, fill=0): + skip_empty, yaml_name, nbuffer, backpressure, depth, memory_model, nnz_value, + fill=0): depth = int(depth) stats_dict = {"mul_6_ops": 0, "spacc1_3_rmw_ops": [], "out_arr_size": 0, "repsiggen_i_17_total_rep": 0, "repsiggen_j_10_total_rep": 0, "repsiggen_i_17_max_rep": 0, "repsiggen_j_10_max_rep": 0, @@ -340,7 +341,7 @@ def test_matmul_ikj_tiled_sparse(samBench, ssname, check_gold, debug_sim, report in_fifo.append("D") fiberlookup_Bi_19 = CompressedCrdRdScan(name="Bi", crd_arr=B_crd0, seg_arr=B_seg0, debug=debug_sim2, statistics=report_stats, fifo=in_fifo, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) mem_model_bi.valid_tile_received() nxt_tile_present[0] = True if mem_blocks_decl_flag and fiberlookup_Bk_14.out_done() and mem_model_bk.valid_tile() and not nxt_tile_present[1]: @@ -361,7 +362,7 @@ def test_matmul_ikj_tiled_sparse(samBench, ssname, check_gold, debug_sim, report in_fifo = fiberlookup_Bk_14.get_fifo() fiberlookup_Bk_14 = CompressedCrdRdScan(name="Bk", crd_arr=B_crd1, seg_arr=B_seg1, debug=debug_sim2, statistics=report_stats, fifo=in_fifo, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) mem_model_bk.valid_tile_received() nxt_tile_present[1] = True if mem_blocks_decl_flag and fiberlookup_Ck_15.out_done() and mem_model_ck.valid_tile() and not nxt_tile_present[2]: @@ -382,7 +383,7 @@ def test_matmul_ikj_tiled_sparse(samBench, ssname, check_gold, debug_sim, report in_fifo = fiberlookup_Ck_15.get_fifo() fiberlookup_Ck_15 = CompressedCrdRdScan(name="Ck", crd_arr=C_crd0, seg_arr=C_seg0, debug=debug_sim2, statistics=report_stats, fifo=in_fifo, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) mem_model_ck.valid_tile_received() repeat_Ci_16.set_in_ref(0, "") repeat_Ci_16.set_in_ref("D", "") @@ -404,7 +405,7 @@ def test_matmul_ikj_tiled_sparse(samBench, ssname, check_gold, debug_sim, report in_fifo = fiberlookup_Cj_12.get_fifo() fiberlookup_Cj_12 = CompressedCrdRdScan(name="Cj", crd_arr=C_crd1, seg_arr=C_seg1, debug=debug_sim2, statistics=report_stats, fifo=in_fifo, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) mem_model_cj.valid_tile_received() nxt_tile_present[3] = True if mem_blocks_decl_flag and arrayvals_B_7.out_done() and mem_model_bvals.valid_tile() and not nxt_tile_present[4]: @@ -422,7 +423,7 @@ def test_matmul_ikj_tiled_sparse(samBench, ssname, check_gold, debug_sim, report B_vals = read_inputs(B_vals_filename, float) in_fifo = arrayvals_B_7.get_fifo() arrayvals_B_7 = Array(name="Bvals", init_arr=B_vals, debug=debug_sim2, statistics=report_stats, fifo=in_fifo, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) mem_model_bvals.valid_tile_received() nxt_tile_present[4] = True if mem_blocks_decl_flag and arrayvals_C_8.out_done() and mem_model_cvals.valid_tile() and not nxt_tile_present[5]: @@ -440,7 +441,7 @@ def test_matmul_ikj_tiled_sparse(samBench, ssname, check_gold, debug_sim, report C_vals = read_inputs(C_vals_filename, float) in_fifo = arrayvals_C_8.get_fifo() arrayvals_C_8 = Array(name="Cvals", init_arr=C_vals, debug=debug_sim2, statistics=report_stats, fifo=in_fifo, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) mem_model_cvals.valid_tile_received() nxt_tile_present[5] = True @@ -613,50 +614,50 @@ def test_matmul_ikj_tiled_sparse(samBench, ssname, check_gold, debug_sim, report debug_sim2 = False # True # False fiberlookup_Bi_19 = CompressedCrdRdScan(name="Bi", crd_arr=B_crd0, seg_arr=B_seg0, debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) fiberlookup_Bk_14 = CompressedCrdRdScan(name="Bk", crd_arr=B_crd1, seg_arr=B_seg1, debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) repsiggen_i_17 = RepeatSigGen(debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) repeat_Ci_16 = Repeat(debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) fiberlookup_Ck_15 = CompressedCrdRdScan(name="Ck", crd_arr=C_crd0, seg_arr=C_seg0, debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) intersectk_13 = Intersect2(debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) crdhold_5 = CrdHold(debug=debug_sim2, statistics=report_stats, back_en=backpressure, - depth=depth) + depth=depth, memory_model_en=memory_model) fiberlookup_Cj_12 = CompressedCrdRdScan(name="Cj", crd_arr=C_crd1, seg_arr=C_seg1, debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) arrayvals_C_8 = Array(name="C", init_arr=C_vals, debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) crdhold_4 = CrdHold(debug=debug_sim2, statistics=report_stats, back_en=backpressure, depth=depth) repsiggen_j_10 = RepeatSigGen(debug=debug_sim2, statistics=report_stats, back_en=backpressure, - depth=depth) + depth=depth, memory_model_en=memory_model) repeat_Bj_9 = Repeat(debug=debug_sim2, statistics=report_stats, back_en=backpressure, depth=depth) arrayvals_B_7 = Array(name="B", init_arr=B_vals, debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) mul_6 = Multiply2(debug=debug_sim2, statistics=report_stats, back_en=backpressure, depth=depth) spaccumulator1_3 = SparseAccumulator1(debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) spaccumulator1_3_drop_crd_inner = StknDrop(debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) spaccumulator1_3_drop_crd_outer = StknDrop(debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) spaccumulator1_3_drop_val = StknDrop(debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) fiberwrite_Xvals_0 = ValsWrScan(name="vals", size=1 * B_shape[0] * C_shape[1], fill=fill, debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) fiberwrite_X1_1 = CompressWrScan(name="X1", seg_size=B_shape[0] + 1, size=B_shape[0] * C_shape[1], fill=fill, debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) fiberwrite_X0_2 = CompressWrScan(name="X0", seg_size=2, size=B_shape[0], fill=fill, debug=debug_sim2, statistics=report_stats, - back_en=backpressure, depth=depth) + back_en=backpressure, depth=depth, memory_model_en=memory_model) # print("INITIALIZE compute loop at ", time_cnt) initialize_cntr = time_cnt mem_model_b.valid_tile_received() diff --git a/sam/sim/test/conftest.py b/sam/sim/test/conftest.py index b91ab7d3..32b8a75a 100644 --- a/sam/sim/test/conftest.py +++ b/sam/sim/test/conftest.py @@ -25,6 +25,8 @@ def pytest_addoption(parser): help="If nbuffering is enabled") parser.addoption("--back", action="store_true", default=False, help="Whether backpressure is enabled") + parser.addoption("--memory-model", action="store_true", default=False, + help="Whether memory model is wanted") parser.addoption("--depth", action="store", default=2, help="fifo depth value") parser.addoption("--nnz-value", action="store", default=5000, @@ -76,6 +78,11 @@ def backpressure(request): return request.config.getoption("--back") +@pytest.fixture +def memory_model(request): + return request.config.getoption("--memory-model") + + @pytest.fixture def skip_empty(request): return request.config.getoption("--skip-empty") @@ -91,11 +98,6 @@ def debug_sim(request): return request.config.getoption("--debug-sim") -@pytest.fixture -def backpressure(request): - return request.config.getoption("--back") - - @pytest.fixture def depth(request): return request.config.getoption("--depth") diff --git a/sam/sim/test/final-apps/test_mat_elemadd_FINAL.py b/sam/sim/test/final-apps/test_mat_elemadd_FINAL.py index 5c0459dd..be82cde9 100644 --- a/sam/sim/test/final-apps/test_mat_elemadd_FINAL.py +++ b/sam/sim/test/final-apps/test_mat_elemadd_FINAL.py @@ -14,11 +14,14 @@ from sam.sim.test.gold import * import os import csv +from sam.sim.test.gen_gantt import gen_gantt cwd = os.getcwd() formatted_dir = os.getenv('SUITESPARSE_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) +# csv file path + @pytest.mark.suitesparse def test_mat_elemadd_FINAL(samBench, ssname, cast, positive_only, check_gold, report_stats, debug_sim, backpressure, depth, fill=0): @@ -61,7 +64,8 @@ def test_mat_elemadd_FINAL(samBench, ssname, cast, positive_only, check_gold, re fiberlookup_Ci_11 = CompressedCrdRdScan(crd_arr=C_crd0, seg_arr=C_seg0, debug=debug_sim, statistics=report_stats, back_en=backpressure, depth=int(depth)) unioni_9 = Union2(debug=debug_sim, statistics=report_stats, back_en=backpressure, depth=int(depth)) - fiberwrite_X0_2 = CompressWrScan(seg_size=2, size=2 * len(B_crd0), fill=fill, debug=debug_sim, statistics=report_stats, + fiberwrite_X0_2 = CompressWrScan(seg_size=2, size=2 * len(B_crd0), fill=fill, debug=debug_sim, + statistics=report_stats, back_en=backpressure, depth=int(depth)) fiberlookup_Bj_7 = CompressedCrdRdScan(crd_arr=B_crd1, seg_arr=B_seg1, debug=debug_sim, statistics=report_stats, back_en=backpressure, depth=int(depth)) @@ -70,8 +74,10 @@ def test_mat_elemadd_FINAL(samBench, ssname, cast, positive_only, check_gold, re unionj_6 = Union2(debug=debug_sim, statistics=report_stats, back_en=backpressure, depth=int(depth)) fiberwrite_X1_1 = CompressWrScan(seg_size=2 * len(B_crd0) + 1, size=2 * len(B_vals), fill=fill, debug=debug_sim, statistics=report_stats, back_en=backpressure, depth=int(depth)) - arrayvals_B_4 = Array(init_arr=B_vals, debug=debug_sim, statistics=report_stats, back_en=backpressure, depth=int(depth)) - arrayvals_C_5 = Array(init_arr=C_vals, debug=debug_sim, statistics=report_stats, back_en=backpressure, depth=int(depth)) + arrayvals_B_4 = Array(init_arr=B_vals, debug=debug_sim, statistics=report_stats, back_en=backpressure, + depth=int(depth)) + arrayvals_C_5 = Array(init_arr=C_vals, debug=debug_sim, statistics=report_stats, back_en=backpressure, + depth=int(depth)) add_3 = Add2(debug=debug_sim, statistics=report_stats, back_en=backpressure, depth=int(depth)) fiberwrite_Xvals_0 = ValsWrScan(size=2 * len(B_vals), fill=fill, debug=debug_sim, statistics=report_stats, back_en=backpressure, depth=int(depth)) @@ -147,18 +153,34 @@ def bench(): extra_info["result/vals_size"] = len(out_vals) extra_info["result/nnz"] = len([x for x in out_vals if x != 0]) - sample_dict = unioni_9.return_statistics() + sample_dict = fiberlookup_Bi_10.return_statistics() for k in sample_dict.keys(): - extra_info["unioni_9" + "/" + k] = sample_dict[k] + extra_info["fiberlookup_Bi_10" + "/" + k] = sample_dict[k] - sample_dict = unionj_6.return_statistics() + sample_dict = fiberlookup_Ci_11.return_statistics() for k in sample_dict.keys(): - extra_info["unionj_6" + "/" + k] = sample_dict[k] + extra_info["fiberlookup_Ci_11" + "/" + k] = sample_dict[k] + + sample_dict = unioni_9.return_statistics() + for k in sample_dict.keys(): + extra_info["unioni_9" + "/" + k] = sample_dict[k] sample_dict = fiberwrite_X0_2.return_statistics() for k in sample_dict.keys(): extra_info["fiberwrite_X0_2" + "/" + k] = sample_dict[k] + sample_dict = fiberlookup_Bj_7.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberlookup_Bj_7" + "/" + k] = sample_dict[k] + + sample_dict = fiberlookup_Cj_8.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberlookup_Cj_8" + "/" + k] = sample_dict[k] + + sample_dict = unionj_6.return_statistics() + for k in sample_dict.keys(): + extra_info["unionj_6" + "/" + k] = sample_dict[k] + sample_dict = fiberwrite_X1_1.return_statistics() for k in sample_dict.keys(): extra_info["fiberwrite_X1_1" + "/" + k] = sample_dict[k] @@ -167,10 +189,6 @@ def bench(): for k in sample_dict.keys(): extra_info["arrayvals_B_4" + "/" + k] = sample_dict[k] - sample_dict = fiberwrite_Xvals_0.return_statistics() - for k in sample_dict.keys(): - extra_info["fiberwrite_Xvals_0" + "/" + k] = sample_dict[k] - sample_dict = arrayvals_C_5.return_statistics() for k in sample_dict.keys(): extra_info["arrayvals_C_5" + "/" + k] = sample_dict[k] @@ -179,21 +197,14 @@ def bench(): for k in sample_dict.keys(): extra_info["add_3" + "/" + k] = sample_dict[k] - sample_dict = fiberlookup_Bi_10.return_statistics() - for k in sample_dict.keys(): - extra_info["fiberlookup_Bi_10" + "/" + k] = sample_dict[k] - - sample_dict = fiberlookup_Ci_11.return_statistics() - for k in sample_dict.keys(): - extra_info["fiberlookup_Ci_11" + "/" + k] = sample_dict[k] - - sample_dict = fiberlookup_Bj_7.return_statistics() + sample_dict = fiberwrite_Xvals_0.return_statistics() for k in sample_dict.keys(): - extra_info["fiberlookup_Bj_7" + "/" + k] = sample_dict[k] + extra_info["fiberwrite_Xvals_0" + "/" + k] = sample_dict[k] - sample_dict = fiberlookup_Cj_8.return_statistics() - for k in sample_dict.keys(): - extra_info["fiberlookup_Cj_8" + "/" + k] = sample_dict[k] + # code for generating csv, gantt chart, txt file + extra_info["backpressure"] = backpressure + extra_info["depth"] = depth + gen_gantt(extra_info, "mat_elemadd") if check_gold: print("Checking gold...") diff --git a/sam/sim/test/final-apps/test_matmul_kij_FINAL.py b/sam/sim/test/final-apps/test_matmul_kij_FINAL.py index 16cc48c4..a81170ef 100644 --- a/sam/sim/test/final-apps/test_matmul_kij_FINAL.py +++ b/sam/sim/test/final-apps/test_matmul_kij_FINAL.py @@ -14,6 +14,7 @@ from sam.sim.test.gold import * import os import csv +from sam.sim.test.gen_gantt import gen_gantt cwd = os.getcwd() formatted_dir = os.getenv('SUITESPARSE_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) @@ -241,6 +242,8 @@ def bench(): for k in sample_dict.keys(): extra_info["fiberlookup_Cj_11" + "/" + k] = sample_dict[k] + gen_gantt(extra_info, "matmul_kij") + if check_gold: print("Checking gold...") check_gold_matmul(ssname, debug_sim, cast, positive_only, out_crds, out_segs, out_vals, "ss01") diff --git a/sam/sim/test/gen_gantt.py b/sam/sim/test/gen_gantt.py new file mode 100644 index 00000000..453a0553 --- /dev/null +++ b/sam/sim/test/gen_gantt.py @@ -0,0 +1,61 @@ +import matplotlib.pyplot as plt +import csv + + +def gen_gantt(extra_info, testname): + block_list = [] + start_list = [] + finish_list = [] + duration_list = [] + + start_c = '' + finish_c = '' + sam_name = '' + + for k in extra_info.keys(): + if "done_cycles" in k: + sam_name = k.split('/')[0] + finish_c = extra_info[k] + if not isinstance(finish_c, int): + finish_list.insert(0, int(finish_c)) + else: + finish_list.insert(0, finish_c) + elif ("start_cycle" in k) and (sam_name in k.split('/')[0]): + ''' + We assume that the info to extra_info is added + in the same order each block is updated. + If we assume the opposite order, use append function + instea of insert. + (e.g.) block_list.insert(0, sam_name) -> block_list.append(sam_name) + ''' + block_list.insert(0, sam_name) + start_c = extra_info[k] + if not isinstance(start_c, int): + start_list.insert(0, int(start_c)) + duration_list.insert(0, finish_c - int(start_c)) + else: + start_list.insert(0, start_c) + duration_list.insert(0, finish_c - start_c) + + back_depth = 'N' # assume there is no back pressure for default + if "backpressure" in extra_info.keys() and extra_info["backpressure"]: + back_depth = extra_info["depth"] + + # Writing cycle info to csv file + with open(testname + '_' + extra_info["dataset"] + '_back_' + back_depth + '.csv', 'w', newline='') as file: + writer = csv.writer(file) + writer.writerow(["block", "start", "finish", "duration", "valid_ops"]) + for idx, block in reversed(list(enumerate(block_list))): + writer.writerow([block, start_list[idx], finish_list[idx], duration_list[idx], '-']) + + # Print all the statistics to a text file + text_file = open(testname + '_' + extra_info["dataset"] + '_back_' + back_depth + ".txt", "w") + for k in extra_info.keys(): + if "/" in k: + text_file.write(k + ": " + str(extra_info[k]) + "\n") + text_file.close() + + # Creating gantt chart + plt.barh(y=block_list, width=duration_list, left=start_list) + file_name = testname + '_' + extra_info["dataset"] + "_back_" + back_depth + ".png" + plt.savefig(file_name, bbox_inches="tight") diff --git a/sam/sim/test/reorder-study/test_reorder_matmul_ikj.py b/sam/sim/test/reorder-study/test_reorder_matmul_ikj.py index af03c87a..80347a06 100644 --- a/sam/sim/test/reorder-study/test_reorder_matmul_ikj.py +++ b/sam/sim/test/reorder-study/test_reorder_matmul_ikj.py @@ -16,9 +16,11 @@ import os import csv import numpy +from sam.sim.test.gen_gantt import gen_gantt + cwd = os.getcwd() formatted_dir = os.getenv('SUITESPARSE_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) -formatted_dir = os.getenv('FROSTT_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) +# formatted_dir = os.getenv('FROSTT_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) synthetic_dir = os.getenv('SYNTHETIC_PATH', default=os.path.join(cwd, 'synthetic')) @@ -29,11 +31,9 @@ reason='CI lacks datasets', ) @pytest.mark.synth -@pytest.mark.parametrize("sparsity", [0.95]) -def test_reorder_matmul_ikj(samBench, sparsity, check_gold, debug_sim, backpressure, depth, fill=0): - - # DCSR - B_dirname = os.path.join(synthetic_dir, f"matrix/DCSR/B_random_sp_{sparsity}/") +# @pytest.mark.parametrize("sparsity", [0.95]) +def test_reorder_matmul_ikj(samBench, ssname, check_gold, report_stats, debug_sim, cast, backpressure, depth, fill=0): + B_dirname = os.path.join(formatted_dir, ssname, "matmul_kij") B_shape_filename = os.path.join(B_dirname, "tensor_B_mode_shape") B_shape = read_inputs(B_shape_filename) @@ -51,7 +51,7 @@ def test_reorder_matmul_ikj(samBench, sparsity, check_gold, debug_sim, backpress B_vals = read_inputs(B_vals_filename, float) # DCSR - C_dirname = os.path.join(synthetic_dir, f"matrix/DCSR/C_random_sp_{sparsity}/") + C_dirname = B_dirname C_shape_filename = os.path.join(C_dirname, "tensor_C_mode_shape") C_shape = read_inputs(C_shape_filename) @@ -68,6 +68,10 @@ def test_reorder_matmul_ikj(samBench, sparsity, check_gold, debug_sim, backpress C_vals_filename = os.path.join(C_dirname, "tensor_C_mode_vals") C_vals = read_inputs(C_vals_filename, float) + # THIS IS FOR SIZE INFO + Bs_dirname = B_dirname + Bs_seg = read_inputs(os.path.join(Bs_dirname, "tensor_B_mode_0_seg")) + # B_dirname = os.path.join(formatted_dir, ssname, "orig", "ss01") # B_shape_filename = os.path.join(B_dirname, "B_shape.txt") # B_shape = read_inputs(B_shape_filename) @@ -227,41 +231,43 @@ def bench(): extra_info["cycles"] = time_cnt extra_info["tensor_B_shape"] = B_shape extra_info["tensor_C_shape"] = C_shape - # sample_dict = spaccumulator1_3.return_statistics() - # for k in sample_dict.keys(): - # extra_info["spaccumulator1_3" + "_" + k] = sample_dict[k] + sample_dict = spaccumulator1_3.return_statistics() + for k in sample_dict.keys(): + extra_info["spaccumulator1_3" + "_" + k] = sample_dict[k] + + sample_dict = fiberwrite_Xvals_0.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_Xvals_0" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_Xvals_0.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_Xvals_0" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_X1_1.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_X1_1" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_X1_1.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_X1_1" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_X0_2.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_X0_2" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_X0_2.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_X0_2" + "_" + k] = sample_dict[k] + sample_dict = repeat_Ci_16.return_statistics() + for k in sample_dict.keys(): + extra_info["repeat_Ci_16" + "_" + k] = sample_dict[k] - # sample_dict = repeat_Ci_16.return_statistics() - # for k in sample_dict.keys(): - # extra_info["repeat_Ci_16" + "_" + k] = sample_dict[k] + sample_dict = intersectk_13.return_statistics() + for k in sample_dict.keys(): + extra_info["intersectk_13" + "_" + k] = sample_dict[k] - # sample_dict = intersectk_13.return_statistics() - # for k in sample_dict.keys(): - # extra_info["intersectk_13" + "_" + k] = sample_dict[k] + sample_dict = repeat_Bj_9.return_statistics() + for k in sample_dict.keys(): + extra_info["repeat_Bj_9" + "_" + k] = sample_dict[k] - # sample_dict = repeat_Bj_9.return_statistics() - # for k in sample_dict.keys(): - # extra_info["repeat_Bj_9" + "_" + k] = sample_dict[k] + sample_dict = arrayvals_B_7.return_statistics() + for k in sample_dict.keys(): + extra_info["arrayvals_B_7" + "_" + k] = sample_dict[k] - # sample_dict = arrayvals_B_7.return_statistics() - # for k in sample_dict.keys(): - # extra_info["arrayvals_B_7" + "_" + k] = sample_dict[k] + sample_dict = arrayvals_C_8.return_statistics() + for k in sample_dict.keys(): + extra_info["arrayvals_C_8" + "_" + k] = sample_dict[k] - # sample_dict = arrayvals_C_8.return_statistics() - # for k in sample_dict.keys(): - # extra_info["arrayvals_C_8" + "_" + k] = sample_dict[k] + gen_gantt(extra_info, "matmul_ikj") if check_gold: print("Checking gold...") diff --git a/sam/sim/test/reorder-study/test_reorder_matmul_jik.py b/sam/sim/test/reorder-study/test_reorder_matmul_jik.py index 0b7e4388..6afbe6df 100644 --- a/sam/sim/test/reorder-study/test_reorder_matmul_jik.py +++ b/sam/sim/test/reorder-study/test_reorder_matmul_jik.py @@ -16,6 +16,8 @@ import csv from sam.onyx.generate_matrices import create_matrix_from_point_list, get_tensor_from_files import numpy +from sam.sim.test.gen_gantt import gen_gantt + cwd = os.getcwd() formatted_dir = os.getenv('SUITESPARSE_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) formatted_dir = os.getenv('FROSTT_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) @@ -212,41 +214,43 @@ def bench(): extra_info["cycles"] = time_cnt extra_info["tensor_B_shape"] = B_shape extra_info["tensor_C_shape"] = C_shape - # sample_dict = fiberwrite_X1_2.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_X1_2" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_X1_2.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_X1_2" + "_" + k] = sample_dict[k] + + sample_dict = repeat_Bj_14.return_statistics() + for k in sample_dict.keys(): + extra_info["repeat_Bj_14" + "_" + k] = sample_dict[k] - # sample_dict = repeat_Bj_14.return_statistics() - # for k in sample_dict.keys(): - # extra_info["repeat_Bj_14" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_X0_1.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_X0_1" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_X0_1.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_X0_1" + "_" + k] = sample_dict[k] + sample_dict = repeat_Ci_10.return_statistics() + for k in sample_dict.keys(): + extra_info["repeat_Ci_10" + "_" + k] = sample_dict[k] - # sample_dict = repeat_Ci_10.return_statistics() - # for k in sample_dict.keys(): - # extra_info["repeat_Ci_10" + "_" + k] = sample_dict[k] + sample_dict = intersectk_7.return_statistics() + for k in sample_dict.keys(): + extra_info["intersectk_7" + "_" + k] = sample_dict[k] - # sample_dict = intersectk_7.return_statistics() - # for k in sample_dict.keys(): - # extra_info["intersectk_7" + "_" + k] = sample_dict[k] + sample_dict = arrayvals_B_5.return_statistics() + for k in sample_dict.keys(): + extra_info["arrayvals_B_5" + "_" + k] = sample_dict[k] - # sample_dict = arrayvals_B_5.return_statistics() - # for k in sample_dict.keys(): - # extra_info["arrayvals_B_5" + "_" + k] = sample_dict[k] + sample_dict = reduce_3.return_statistics() + for k in sample_dict.keys(): + extra_info["reduce_3" + "_" + k] = sample_dict[k] - # sample_dict = reduce_3.return_statistics() - # for k in sample_dict.keys(): - # extra_info["reduce_3" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_Xvals_0.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_Xvals_0" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_Xvals_0.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_Xvals_0" + "_" + k] = sample_dict[k] + sample_dict = arrayvals_C_6.return_statistics() + for k in sample_dict.keys(): + extra_info["arrayvals_C_6" + "_" + k] = sample_dict[k] - # sample_dict = arrayvals_C_6.return_statistics() - # for k in sample_dict.keys(): - # extra_info["arrayvals_C_6" + "_" + k] = sample_dict[k] + gen_gantt(extra_info, "matmul_jik") if check_gold: print("Checking gold...") diff --git a/sam/sim/test/reorder-study/test_reorder_matmul_jki.py b/sam/sim/test/reorder-study/test_reorder_matmul_jki.py index 6511ddc6..8e12287d 100644 --- a/sam/sim/test/reorder-study/test_reorder_matmul_jki.py +++ b/sam/sim/test/reorder-study/test_reorder_matmul_jki.py @@ -16,6 +16,8 @@ import csv from sam.onyx.generate_matrices import create_matrix_from_point_list, get_tensor_from_files import numpy +from sam.sim.test.gen_gantt import gen_gantt + cwd = os.getcwd() formatted_dir = os.getenv('SUITESPARSE_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) formatted_dir = os.getenv('FROSTT_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) @@ -234,41 +236,43 @@ def bench(): extra_info["cycles"] = time_cnt extra_info["tensor_B_shape"] = B_shape extra_info["tensor_C_shape"] = C_shape - # sample_dict = spaccumulator1_3.return_statistics() - # for k in sample_dict.keys(): - # extra_info["spaccumulator1_3" + "_" + k] = sample_dict[k] + sample_dict = spaccumulator1_3.return_statistics() + for k in sample_dict.keys(): + extra_info["spaccumulator1_3" + "_" + k] = sample_dict[k] + + sample_dict = fiberwrite_Xvals_0.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_Xvals_0" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_Xvals_0.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_Xvals_0" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_X0_1.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_X0_1" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_X0_1.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_X0_1" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_X1_2.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_X1_2" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_X1_2.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_X1_2" + "_" + k] = sample_dict[k] + sample_dict = repeat_Bj_16.return_statistics() + for k in sample_dict.keys(): + extra_info["repeat_Bj_16" + "_" + k] = sample_dict[k] - # sample_dict = repeat_Bj_16.return_statistics() - # for k in sample_dict.keys(): - # extra_info["repeat_Bj_16" + "_" + k] = sample_dict[k] + sample_dict = intersectk_13.return_statistics() + for k in sample_dict.keys(): + extra_info["intersectk_13" + "_" + k] = sample_dict[k] - # sample_dict = intersectk_13.return_statistics() - # for k in sample_dict.keys(): - # extra_info["intersectk_13" + "_" + k] = sample_dict[k] + sample_dict = repeat_Ci_9.return_statistics() + for k in sample_dict.keys(): + extra_info["repeat_Ci_9" + "_" + k] = sample_dict[k] - # sample_dict = repeat_Ci_9.return_statistics() - # for k in sample_dict.keys(): - # extra_info["repeat_Ci_9" + "_" + k] = sample_dict[k] + sample_dict = arrayvals_C_8.return_statistics() + for k in sample_dict.keys(): + extra_info["arrayvals_C_8" + "_" + k] = sample_dict[k] - # sample_dict = arrayvals_C_8.return_statistics() - # for k in sample_dict.keys(): - # extra_info["arrayvals_C_8" + "_" + k] = sample_dict[k] + sample_dict = arrayvals_B_7.return_statistics() + for k in sample_dict.keys(): + extra_info["arrayvals_B_7" + "_" + k] = sample_dict[k] - # sample_dict = arrayvals_B_7.return_statistics() - # for k in sample_dict.keys(): - # extra_info["arrayvals_B_7" + "_" + k] = sample_dict[k] + gen_gantt(extra_info, "matmul_jki") if check_gold: print("Checking gold...") diff --git a/sam/sim/test/reorder-study/test_reorder_matmul_kij.py b/sam/sim/test/reorder-study/test_reorder_matmul_kij.py index c16f522a..8ea0851c 100644 --- a/sam/sim/test/reorder-study/test_reorder_matmul_kij.py +++ b/sam/sim/test/reorder-study/test_reorder_matmul_kij.py @@ -16,6 +16,8 @@ import csv from sam.onyx.generate_matrices import create_matrix_from_point_list, get_tensor_from_files import numpy +from sam.sim.test.gen_gantt import gen_gantt + cwd = os.getcwd() formatted_dir = os.getenv('SUITESPARSE_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) formatted_dir = os.getenv('FROSTT_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) @@ -230,41 +232,43 @@ def bench(): extra_info["cycles"] = time_cnt extra_info["tensor_B_shape"] = B_shape extra_info["tensor_C_shape"] = C_shape - # sample_dict = intersectk_16.return_statistics() - # for k in sample_dict.keys(): - # extra_info["intersectk_16" + "_" + k] = sample_dict[k] + sample_dict = intersectk_16.return_statistics() + for k in sample_dict.keys(): + extra_info["intersectk_16" + "_" + k] = sample_dict[k] + + sample_dict = spaccumulator2_3.return_statistics() + for k in sample_dict.keys(): + extra_info["spaccumulator2_3" + "_" + k] = sample_dict[k] - # sample_dict = spaccumulator2_3.return_statistics() - # for k in sample_dict.keys(): - # extra_info["spaccumulator2_3" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_Xvals_0.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_Xvals_0" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_Xvals_0.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_Xvals_0" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_X1_1.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_X1_1" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_X1_1.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_X1_1" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_X0_2.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_X0_2" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_X0_2.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_X0_2" + "_" + k] = sample_dict[k] + sample_dict = repeat_Ci_12.return_statistics() + for k in sample_dict.keys(): + extra_info["repeat_Ci_12" + "_" + k] = sample_dict[k] - # sample_dict = repeat_Ci_12.return_statistics() - # for k in sample_dict.keys(): - # extra_info["repeat_Ci_12" + "_" + k] = sample_dict[k] + sample_dict = repeat_Bj_8.return_statistics() + for k in sample_dict.keys(): + extra_info["repeat_Bj_8" + "_" + k] = sample_dict[k] - # sample_dict = repeat_Bj_8.return_statistics() - # for k in sample_dict.keys(): - # extra_info["repeat_Bj_8" + "_" + k] = sample_dict[k] + sample_dict = arrayvals_B_6.return_statistics() + for k in sample_dict.keys(): + extra_info["arrayvals_B_6" + "_" + k] = sample_dict[k] - # sample_dict = arrayvals_B_6.return_statistics() - # for k in sample_dict.keys(): - # extra_info["arrayvals_B_6" + "_" + k] = sample_dict[k] + sample_dict = arrayvals_C_7.return_statistics() + for k in sample_dict.keys(): + extra_info["arrayvals_C_7" + "_" + k] = sample_dict[k] - # sample_dict = arrayvals_C_7.return_statistics() - # for k in sample_dict.keys(): - # extra_info["arrayvals_C_7" + "_" + k] = sample_dict[k] + gen_gantt(extra_info, "matmul_kij") if check_gold: print("Checking gold...") diff --git a/sam/sim/test/reorder-study/test_reorder_matmul_kji.py b/sam/sim/test/reorder-study/test_reorder_matmul_kji.py index 184ca2c4..60284171 100644 --- a/sam/sim/test/reorder-study/test_reorder_matmul_kji.py +++ b/sam/sim/test/reorder-study/test_reorder_matmul_kji.py @@ -16,6 +16,8 @@ import csv from sam.onyx.generate_matrices import create_matrix_from_point_list, get_tensor_from_files import numpy +from sam.sim.test.gen_gantt import gen_gantt + cwd = os.getcwd() formatted_dir = os.getenv('SUITESPARSE_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) formatted_dir = os.getenv('FROSTT_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) @@ -31,7 +33,6 @@ @pytest.mark.synth @pytest.mark.parametrize("sparsity", [0.95]) def test_reorder_matmul_kji(samBench, sparsity, check_gold, debug_sim, backpressure, depth, fill=0): - # DCSC B_dirname = os.path.join(synthetic_dir, f"matrix/DCSC/B_random_sp_{sparsity}/") B_shape_filename = os.path.join(B_dirname, "tensor_B_mode_shape") @@ -230,41 +231,44 @@ def bench(): extra_info["cycles"] = time_cnt extra_info["tensor_B_shape"] = B_shape extra_info["tensor_C_shape"] = C_shape - # sample_dict = intersectk_16.return_statistics() - # for k in sample_dict.keys(): - # extra_info["intersectk_16" + "_" + k] = sample_dict[k] - # sample_dict = repeat_Bj_12.return_statistics() - # for k in sample_dict.keys(): - # extra_info["repeat_Bj_12" + "_" + k] = sample_dict[k] + sample_dict = intersectk_16.return_statistics() + for k in sample_dict.keys(): + extra_info["intersectk_16" + "_" + k] = sample_dict[k] + + sample_dict = repeat_Bj_12.return_statistics() + for k in sample_dict.keys(): + extra_info["repeat_Bj_12" + "_" + k] = sample_dict[k] + + sample_dict = spaccumulator2_3.return_statistics() + for k in sample_dict.keys(): + extra_info["spaccumulator2_3" + "_" + k] = sample_dict[k] - # sample_dict = spaccumulator2_3.return_statistics() - # for k in sample_dict.keys(): - # extra_info["spaccumulator2_3" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_Xvals_0.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_Xvals_0" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_Xvals_0.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_Xvals_0" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_X0_1.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_X0_1" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_X0_1.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_X0_1" + "_" + k] = sample_dict[k] + sample_dict = fiberwrite_X1_2.return_statistics() + for k in sample_dict.keys(): + extra_info["fiberwrite_X1_2" + "_" + k] = sample_dict[k] - # sample_dict = fiberwrite_X1_2.return_statistics() - # for k in sample_dict.keys(): - # extra_info["fiberwrite_X1_2" + "_" + k] = sample_dict[k] + sample_dict = repeat_Ci_8.return_statistics() + for k in sample_dict.keys(): + extra_info["repeat_Ci_8" + "_" + k] = sample_dict[k] - # sample_dict = repeat_Ci_8.return_statistics() - # for k in sample_dict.keys(): - # extra_info["repeat_Ci_8" + "_" + k] = sample_dict[k] + sample_dict = arrayvals_C_7.return_statistics() + for k in sample_dict.keys(): + extra_info["arrayvals_C_7" + "_" + k] = sample_dict[k] - # sample_dict = arrayvals_C_7.return_statistics() - # for k in sample_dict.keys(): - # extra_info["arrayvals_C_7" + "_" + k] = sample_dict[k] + sample_dict = arrayvals_B_6.return_statistics() + for k in sample_dict.keys(): + extra_info["arrayvals_B_6" + "_" + k] = sample_dict[k] - # sample_dict = arrayvals_B_6.return_statistics() - # for k in sample_dict.keys(): - # extra_info["arrayvals_B_6" + "_" + k] = sample_dict[k] + gen_gantt(extra_info, "matmul_kji") if check_gold: print("Checking gold...") diff --git a/sam/util.py b/sam/util.py index cae31b8a..ae70dee1 100644 --- a/sam/util.py +++ b/sam/util.py @@ -1,24 +1,22 @@ -import scipy.sparse -import scipy.io -import os import glob -import numpy import itertools -import shutil -import numpy as np import math import sparse - from pathlib import Path +import os +import shutil from dataclasses import dataclass +from pathlib import Path -import os -import math import numpy +import numpy as np +import scipy.io +import scipy.sparse +import sparse # All environment variables for SAM should live here or in make file cwd = os.getcwd() -SAM_HOME = os.getenv('HOSTNAME', default=cwd) +SAM_HOME = os.getenv('SAM_HOME', default=cwd) HOSTNAME = os.getenv('HOSTNAME', default="local") SUITESPARSE_PATH = os.getenv('SUITESPARSE_PATH', default=os.path.join(SAM_HOME, "data", "suitesparse")) SUITESPARSE_FORMATTED_PATH = os.getenv('SUITESPARSE_FORMATTED_PATH', default=os.path.join(SAM_HOME, "data", @@ -39,6 +37,13 @@ def safeCastScipyTensorToInts(tensor): data[i] = round_sparse(tensor.data[i]) return scipy.sparse.coo_matrix(tensor.coords, data, tensor.shape) +def constructOtherVecKey(tensorName, variant, sparsity=0.001): + path = os.getenv('TACO_TENSOR_PATH') + return f"{path}/{tensorName}-vec_{variant}-{sparsity}.tns" + +def constructOtherMatKey(tensorName, variant, sparsity=0.001): + path = os.getenv('TACO_TENSOR_PATH') + return f"{path}/../suitesparse/{tensorName}_{variant}.mtx" # ScipyTensorShifter shifts all elements in the last mode # of the input scipy/sparse tensor by one. @@ -141,23 +146,26 @@ def load(self, path): assert False -# PydataSparseTensorLoader loads a sparse tensor from a file into # a pydata.sparse tensor. -# class PydataSparseTensorLoader: -# def __init__(self): -# self.loader = TnsFileLoader() -# -# def load(self, path): -# dims, coords, values = self.loader.load(path) -# return sparse.COO(coords, values, tuple(dims)) -# -# # PydataSparseTensorDumper dumps a sparse tensor to a the desired file. -# class PydataSparseTensorDumper: -# def __init__(self): -# self.dumper = TnsFileDumper() -# -# def dump(self, tensor, path): -# self.dumper.dump_dict_to_file(tensor.shape, sparse.DOK(tensor).data, path) +class PydataSparseTensorLoader: + def __init__(self): + self.loader = TnsFileLoader() + + def load(self, path): + dims, coords, values = self.loader.load(path) + return sparse.COO(coords, values, tuple(dims)) + + +# PydataSparseTensorDumper dumps a sparse tensor to a the desired file. +class PydataSparseTensorDumper: + def __init__(self): + self.dumper = TnsFileDumper() + + def dump(self, tensor, path): + assert isinstance(tensor, sparse.DOK), "The tensor needs to be a pydata/sparse DOK format" + self.dumper.dump_dict_to_file(tensor.shape, tensor.data, path) + + # # # @@ -207,12 +215,13 @@ def shiftLastMode(self, tensor): @dataclass class DoublyCompressedMatrix: - shape: (int) - seg0: [int] - crd0: [int] - seg1: [int] - crd1: [int] - data: [float] + # shape: (int) + shape = [int] + seg0 = [int] + crd0 = [int] + seg1 = [int] + crd1 = [int] + data = [float] # ScipyMatrixMarketTensorLoader loads tensors in the matrix market format @@ -533,7 +542,7 @@ def __init__(self): self.lastName = None self.tensor = None - def load(self, tensor, suiteSparse, cast, format_str): + def load(self, tensor, cast): if self.lastName == str(tensor): return self.tensor else: @@ -546,8 +555,22 @@ def load(self, tensor, suiteSparse, cast, format_str): return self.tensor +# FrosttTensor represents a tensor in the FROSTT dataset. +class FrosttTensor: + def __init__(self, path): + self.path = path + self.__name__ = self.__str__() + + def __str__(self): + f = os.path.split(self.path)[1] + return f.replace(".tns", "") + + def load(self): + return PydataSparseTensorLoader().load(self.path) + + # PydataMatrixMarketTensorLoader loads tensors in the matrix market format -# into pydata.sparse matrices. +# into sparse matrices. # class PydataMatrixMarketTensorLoader: # def __init__(self): # pass diff --git a/scripts/README.md b/scripts/README.md index b004db54..1207da54 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -1,4 +1,21 @@ ----------------------------------- -| File Name | Usage | Description | ------------------------------------ -| +# SAM Scripts + +| Folder Name | Description | +| ------------ | ----------------------- | +| `artifact/` | Scripts used solely for The Sparse Abstract Machine ASPLOS 2023 AE | +| `formatting/` | Scripts used to format tensors from .mtx or .tns to other compressed formats | +| `gen_sam_apps/` | Scripts used to generate SAM apps from the compiler | +| `get_data/` | Scripts used to download and unpack datasets | +| `logs/` | Saved log files | +| `run_cpu/` | Scripts used to run the CPU baseline code | +| `run_onyx/` | Scripts used to run Onyx simulations | +| `run_sam_sim/` | Scripts used to run SAM simulations via Pytest | +| `stats/` | Scripts used to compute statistics on data (for DSE/development) | +| `tensor_names` | Scripts and files that have all the dataset tensor names | +| `tiling/` | Scripts used to generate, run, and check tiled simulations | +| `util/` | Utility code used by scripts under the `scripts/` directory | + + +Each folder has a README with a description of the corresponding scripts in +that directory. Also For each individual script, see the first line comment for how +to run the command. diff --git a/scripts/advanced_simulator_runner.sh b/scripts/advanced_simulator_runner.sh deleted file mode 100755 index 397342fc..00000000 --- a/scripts/advanced_simulator_runner.sh +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/bash -#SBATCH -N 1 -#SBATCH --mem 120000 -#SBATCH -p lanka-v3 -#SBATCH --exclusive - -set -u - -BENCHMARKS=( -## mat_vecmul_FINAL -# matmul_FINAL -# mat_identity -# mat_identity_back -# matmul_ikj_memory_back -# matmul_ikj_sparse_tiling2 -# matmul_ikj_glb_tile -# matmul_ikj_glb_tile2 -matmul_ikj_tile_pipeline_final -# matmul_ikj_glb_tile_pipeline -# i matmul_ikj_glb_no_pipe -# matmul_ikj_input_only -# matmul_ikj_tiled_bcsstm02 -# matmul_ikj_check -# matmul_ikj_tiling -# matmul_ikj_back -# mat_elemmul_FINAL -# mat_elemadd_FINAL -# mat_elemadd3_FINAL -# mat_residual_FINAL -# mat_mattransmul_FINAL -) - -errors=() -RED='\033[0;31m' -NC='\033[0m' # No Color - -basedir=$(pwd) - -# LANKA -if [ $2 -eq 1 ]; then - export SUITESPARSE_PATH=/data/scratch/changwan/florida_all - export FROSTT_PATH=/data/scratch/owhsu/datasets/frostt - export TACO_TENSOR_PATH=/data/scratch/owhsu/datasets - export SUITESPARSE_FORMATTED_PATH=/data/scratch/owhsu/datasets/suitesparse-formatted - export FROSTT_FORMATTED_TACO_PATH=/data/scratch/owhsu/datasets/frostt-formatted/taco-tensor - export FROSTT_FORMATTED_PATH=/data/scratch/owhsu/datasets/frostt-formatted - export SAM_HOME=$basedir - export TILED_SUITESPARSE_FORMATTED_PATH=${SAM_HOME}/tiles/matmul_ikj/formatted - export TILED_OUTPUT_PATH=${SAM_HOME}/tiles/matmul_ikj/output/ - - mkdir -p $TACO_TENSOR_PATH - mkdir -p $SUITESPARSE_FORMATTED_PATH - mkdir -p $FROSTT_FORMATTED_TACO_PATH - mkdir -p $FROSTT_FORMATTED_PATH - - lanka=ON - neva=OFF -elif [ $2 -eq 2 ]; then - export SUITESPARSE_PATH=/nobackup/owhsu/sparse-datasets/suitesparse/ - export FROSTT_PATH=/nobackup/owhsu/sparse-datasets/frostt/ - export SUITESPARSE_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/suitesparse-formatted - export FROSTT_FORMATTED_TACO_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted/taco-tensor - export FROSTT_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted - export TACO_TENSOR_PATH=/nobackup/owhsu/sparse-datasets - export SAM_HOME=$basedir - export TILED_SUITESPARSE_FORMATTED_PATH=${SAM_HOME}/tiles/matmul_ikj/formatted - export TILED_OUTPUT_PATH=${SAM_HOME}/tiles/matmul_ikj/output/ - lanka=OFF - neva=ON -else - lanka=OFF - neva=OFF - export SAM_HOME=$basedir - export TILED_SUITESPARSE_FORMATTED_PATH=${SAM_HOME}/tiles/matmul_ikj/formatted - export TILED_OUTPUT_PATH=${SAM_HOME}/tiles/matmul_ikj/output/ -fi - -sspath=$SUITESPARSE_PATH -benchout=suitesparse-bench_simulator/sam -format_outdir=${SUITESPARSE_FORMATTED_PATH} - -source $basedir/../venv/bin/activate - -#__conda_setup="$('/data/scratch/owhsu/miniconda/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" -#if [ $? -eq 0 ]; then -# eval "$__conda_setup" -#else -# if [ -f "/data/scratch/owhsu/miniconda/etc/profile.d/conda.sh" ]; then -# . "/data/scratch/owhsu/miniconda/etc/profile.d/conda.sh" -# else -# export PATH="/data/scratch/owhsu/miniconda/bin:$PATH" -# fi -#fi -#unset __conda_setup -#conda activate aha - -mkdir -p "$benchout" -mkdir -p $format_outdir -mkdir -p $TACO_TENSOR_PATH/other-formatted-taco - -for b in ${!BENCHMARKS[@]}; do - bench=${BENCHMARKS[$b]} - path=$basedir/$benchout/$bench - mkdir -p $basedir/$benchout/$bench - echo "Testing $bench..." - - while read line; do - cd $format_outdir - - if [ $2 -eq 1 ]; then - matrix="$sspath/$line/$line.mtx" - elif [ $2 -eq 2 ]; then - matrix="$sspath/$line.mtx" - else - matrix="$sspath/$line.mtx" - fi - - if [ "$bench" == "matmul_ikj" ]; then - echo "Generating input format files for $line..." - SUITESPARSE_TENSOR_PATH=$matrix python $basedir/scripts/datastructure_suitesparse.py -n $line - - SUITESPARSE_TENSOR_PATH=$matrix $basedir/compiler/taco/build/bin/taco-test sam.pack_other_ss - python $basedir/scripts/datastructure_frostt.py -n $line -f ss01 --other -ss - fi - - cd $basedir/sam/sim - #python -m cProfile -o test/final-apps/test_$bench.py --ssname $line -s --benchmark-json=$path/$line.json - pytest test/advanced-simulator/test_$bench.py --ssname $line -s --report-stats --check-gold --skip-empty --nbuffer --yaml_name=$3 --benchmark-json=$path/$line.json - # pytest test/advanced-simulator/test_$bench.py --ssname $line -s --report-stats --back --depth=1 --debug-sim --check-gold --benchmark-json=$path/$line.json - # python $basedir/scripts/converter.py --json_name $path/$line.json - - status=$? - if [ $status -gt 0 ] - then - errors+=("${line}, ${bench}") - fi - - cd $basedir - done <$1 - - python3 $basedir/scripts/bench_csv_aggregator.py $path $basedir/$benchout/suitesparse_$bench.csv - - echo -e "${RED}Failed tests:" - for i in ${!errors[@]}; do - error=${errors[$i]} - echo -e "${RED}$error," - done - echo -e "${NC}" -done diff --git a/scripts/artifact/README.md b/scripts/artifact/README.md new file mode 100644 index 00000000..58c200c6 --- /dev/null +++ b/scripts/artifact/README.md @@ -0,0 +1,10 @@ +# Artifact Scripts + +The `scripts/artifact/` folder contains scripts used only for artifact evaluation of "The Sparse Abstract Machine" ASPLOS 2023. + +1. `artifact_docker_copy.py` - Python script that copies figures from docker to local machine for viewing. +2. `collect_node_counts.py` - Python script that counts and prints the number of each SAM primitive for Table 1 in "The Sparse Abstract Machine". +3. `plot_memory_model.py` - Python script that plots the memory model graph (Figure +4. `plot_stream_overhead.py` - +5. `stream_overhead.sh` - + diff --git a/scripts/artifact_docker_copy.py b/scripts/artifact/artifact_docker_copy.py similarity index 100% rename from scripts/artifact_docker_copy.py rename to scripts/artifact/artifact_docker_copy.py diff --git a/scripts/collect_node_counts.py b/scripts/artifact/collect_node_counts.py similarity index 100% rename from scripts/collect_node_counts.py rename to scripts/artifact/collect_node_counts.py diff --git a/scripts/plot_memory_model.py b/scripts/artifact/plot_memory_model.py similarity index 100% rename from scripts/plot_memory_model.py rename to scripts/artifact/plot_memory_model.py diff --git a/scripts/plot_stream_overhead.py b/scripts/artifact/plot_stream_overhead.py similarity index 100% rename from scripts/plot_stream_overhead.py rename to scripts/artifact/plot_stream_overhead.py diff --git a/scripts/stream_overhead.sh b/scripts/artifact/stream_overhead.sh similarity index 100% rename from scripts/stream_overhead.sh rename to scripts/artifact/stream_overhead.sh diff --git a/scripts/checker_diff.sh b/scripts/checker_diff.sh deleted file mode 100755 index b84cdeba..00000000 --- a/scripts/checker_diff.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -for numpy in $1/*-numpy.tns; do - taco=${numpy/-numpy/-taco} - if [ ! "$(wc -l < $numpy | xargs)" -eq "$(wc -l < $taco | xargs)" ]; then - echo "Files $numpy and $taco have a differing number of entries." - fi -done diff --git a/scripts/datastructure_tns.py b/scripts/datastructure_tns_old.py similarity index 90% rename from scripts/datastructure_tns.py rename to scripts/datastructure_tns_old.py index ccf60fae..863f1d0b 100644 --- a/scripts/datastructure_tns.py +++ b/scripts/datastructure_tns_old.py @@ -33,7 +33,8 @@ if taco_format_dirname is None: print("Please set the TACO_TENSOR_PATH environment variable") exit() - taco_format_dirname = os.path.join(taco_format_dirname, "other-formatted-taco") + taco_format_dirname = os.path.join(taco_format_dirname, "other") + # taco_format_dirname = os.path.join(taco_format_dirname, "other-formatted-taco") else: outdir_name = os.getenv('FROSTT_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) taco_format_dirname = os.getenv('FROSTT_FORMATTED_TACO_PATH') @@ -44,24 +45,34 @@ out_path = Path(outdir_name) out_path.mkdir(parents=True, exist_ok=True) +print("args.name is ", args.name) + if args.name is None: print("Please enter a tensor name") exit() +print("\nhere after Please enter tensor name\n") if args.format is not None: assert args.format in formats levels = args.format[:-3] if args.other: assert args.bench is not None + + print("here to get other file names\n") otherfileNames = [f for f in os.listdir(taco_format_dirname) if os.path.isfile(os.path.join(taco_format_dirname, f)) and args.name in f] + print("have otherfileNames\n") + print(os.listdir(outdir_name)) + print("length of otherfilenames is: ", len(otherfileNames), "\n") + for otherfile in otherfileNames: + print("iterate thru otherfileNames\n") taco_format_orig_filename = os.path.join(taco_format_dirname, otherfile) - outdir_other_name = os.path.join(outdir_name, args.name, args.bench) - # outdir_other_name = os.path.join(outdir_name, args.name, 'other', otherfile[:-4]) + # outdir_other_name = os.path.join(outdir_name, args.name, args.bench) + outdir_other_name = os.path.join(outdir_name, args.name, 'other', otherfile[:-4]) outdir_orig_path = Path(outdir_other_name) outdir_orig_path.mkdir(parents=True, exist_ok=True) @@ -94,6 +105,7 @@ parse_taco_format(taco_format_orig_filename, outdir_other_name, name, args.format, hw_filename=args.hw) else: + print("in else statement\n") taco_format_orig_filename = os.path.join(taco_format_dirname, args.name + "_" + levels + '.txt') taco_format_shift_filename = os.path.join(taco_format_dirname, args.name + '_shift_' + levels + '.txt') @@ -102,6 +114,8 @@ outdir_orig_path = Path(outdir_orig_name) outdir_orig_path.mkdir(parents=True, exist_ok=True) + print("parse taco format\n") + parse_taco_format(taco_format_orig_filename, outdir_orig_name, 'B', args.format, hw_filename=args.hw) # Shifted diff --git a/scripts/ext_runner.sh b/scripts/ext_runner.sh deleted file mode 100755 index 5ff01444..00000000 --- a/scripts/ext_runner.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash -#SBATCH -N 1 -#SBATCH --mem 120000 -#SBATCH -p lanka-v3 -#SBATCH --exclusive - -rm -rf $basedir/tiles/* - -./scripts/tile_ext.sh $1 memory_config_extensor_17M_llb.yaml - -python scripts/generate_gold_matmul_tiled.py --yaml_name memory_config_extensor_17M_llb.yaml - -./scripts/advanced_simulator_runner.sh scripts/temp.txt 2 memory_config_extensor_17M_llb.yaml diff --git a/scripts/formatting/README.md b/scripts/formatting/README.md new file mode 100644 index 00000000..a78f55dc --- /dev/null +++ b/scripts/formatting/README.md @@ -0,0 +1,15 @@ +# Formatting + +The `scripts/formatting/` folder contains scripts used to format the datasets from mtx/tns files to the seg/crd/vals arrays for CSF. The CSF files are expected by both the Onyx AHA flow and the SAM simulator. + +1. `datastructure_suitesparse.py` - Python script used by + `generate_suitesparse_formats.sh` to format from mtx to CSF files. +2. `datastructure_tns.py` - Python script used by + `generate_frostt_formats.sh` to format from tns to CSF files. +3. `download_unpack_format_suitesparse.sh` - Script that downloads, unpacks, + and formats a list of suitesparse matrices. To download and unpack it + calls scripts in `scripts/get_data`. +4. `generate_frostt_formats.sh` - Formats all FROSTT datasets. FIXME: This file needs fixing as it uses the old CSF formatting (e.g. `matrix_name/B_seg0`) instead of the new one (e.g. `app/tensor_B_mode_0_seg`) +5. `generate_suitesparse_formats.sh` - Formats all SuiteSparse datasets + +Formatted CSF files should reside in `$SUITESPARSE_FORMATTED_PATH` for SuiteSparse matrices. diff --git a/scripts/formatting/datastructure_suitesparse.py b/scripts/formatting/datastructure_suitesparse.py new file mode 100644 index 00000000..d2e586c2 --- /dev/null +++ b/scripts/formatting/datastructure_suitesparse.py @@ -0,0 +1,291 @@ +import argparse +import os +import shutil +import scipy.sparse +import numpy as np + +from pathlib import Path + +import sys +# the mock-0.3.1 dir contains testcase.py, testutils.py & mock.py +sys.path.append('/home/avb03/sam/scripts') + +from util.util import FormatWriter, SuiteSparseTensor, InputCacheSuiteSparse +from sam.util import SUITESPARSE_FORMATTED_PATH, ScipyTensorShifter + +all_formats = ["coo", "cooT", "csr", "dcsr", "dcsc", "csc", "dense", "denseT"] +formats = ["coo", "cooT", "csr", "dcsr", "dcsc", "csc", "dense"] +scipy_formats = ["coo", "csr", "csc"] + + +def write_datastructure_tiles(args, tensor, out_path, tile_name): + print("Writing " + args.name + " for test " + args.benchname + "...") + + dirname = args.output_dir_path if args.output_dir_path is not None else os.path.join(out_path, args.name, args.benchname) + dirname = os.path.join(dirname, tile_name) + dirpath = Path(dirname) + if os.path.exists(dirpath): + shutil.rmtree(dirpath) + dirpath.mkdir(parents=True, exist_ok=True, mode=0o777) + + print(tile_name) + tensorname = tile_name.split("_")[1] + + coo = inputCache.load(tensor, False) + formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss01", hw=False) + # formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss01", hw=args.hw) + + + +def write_datastructure_bench(args, tensor, out_path, tiles=None): + shifter = ScipyTensorShifter() + + print("Writing " + args.name + " for test " + args.benchname + "...") + + dirname = args.output_dir_path if args.output_dir_path is not None else os.path.join(out_path, args.name, args.benchname) + print("dirname: " + dirname) + if tiles is not None: + dirname = os.path.join(dirname, tiles) + dirpath = Path(dirname) + if os.path.exists(dirpath): + shutil.rmtree(dirpath) + dirpath.mkdir(parents=True, exist_ok=True, mode=0o777) + + if "mat_mattransmul" in args.benchname or "mat_residual" in args.benchname: + tensorname = "C" + else: + tensorname = "B" + + coo = inputCache.load(tensor, False) + shape = coo.shape + + # These benchmarks need format_str == "ss10" + if args.benchname not in ["matmul_kij", "matmul_kji", "matmul_jki", "mat_vecmul", "mat_vecmul_ji", "mat_mattransmul"]: + formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss01") + + if "matmul_ijk" in args.benchname: + shifted = shifter.shiftLastMode(coo) + + print("Writing " + args.name + " shifted and transposed...") + tensorname = "C" + trans_shifted = shifted.transpose() + formatWriter.writeout_separate_sparse_only(trans_shifted, dirname, tensorname, format_str="ss10") + + elif "matmul_jik" in args.benchname: + shifted = shifter.shiftLastMode(coo) + + print("Writing " + args.name + " shifted and transposed...") + tensorname = "C" + trans_shifted = shifted.transpose() + formatWriter.writeout_separate_sparse_only(trans_shifted, dirname, tensorname, format_str="ss10") + elif "matmul_ikj" in args.benchname: + shifted = shifter.shiftLastMode(coo) + + print("Writing " + args.name + " shifted and transposed...") + tensorname = "C" + trans_shifted = shifted.transpose() + formatWriter.writeout_separate_sparse_only(trans_shifted, dirname, tensorname, format_str="ss01") + + elif "matmul_jki" in args.benchname: + formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss10") + + shifted = shifter.shiftLastMode(coo) + + print("Writing " + args.name + " shifted and transposed...") + tensorname = "C" + trans_shifted = shifted.transpose() + formatWriter.writeout_separate_sparse_only(trans_shifted, dirname, tensorname, format_str="ss10") + + elif "matmul_kij" in args.benchname: + formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss10") + + shifted = shifter.shiftLastMode(coo) + + print("Writing " + args.name + " shifted and transposed...") + tensorname = "C" + trans_shifted = shifted.transpose() + formatWriter.writeout_separate_sparse_only(trans_shifted, dirname, tensorname, format_str="ss01") + + elif "matmul_kji" in args.benchname: + formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss10") + + shifted = shifter.shiftLastMode(coo) + + print("Writing " + args.name + " shifted and transposed...") + tensorname = "C" + trans_shifted = shifted.transpose() + formatWriter.writeout_separate_sparse_only(trans_shifted, dirname, tensorname, format_str="ss01") + + elif "mat_elemadd3" in args.benchname: + print("Writing " + args.name + " shifted...") + tensorname = "C" + shifted = shifter.shiftLastMode(coo) + formatWriter.writeout_separate_sparse_only(shifted, dirname, tensorname, format_str="ss01") + + print("Writing " + args.name + " shifted2...") + tensorname = "D" + shifted2 = shifter.shiftLastMode(shifted) + formatWriter.writeout_separate_sparse_only(shifted2, dirname, tensorname, format_str="ss01") + + elif "mat_elemadd" in args.benchname or "mat_elemmul" in args.benchname: + print("Writing " + args.name + " shifted...") + tensorname = "C" + shifted = shifter.shiftLastMode(coo) + formatWriter.writeout_separate_sparse_only(shifted, dirname, tensorname, format_str="ss01") + + elif "mat_mattransmul" in args.benchname: + formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss10") + # if not args.no_gen_other: + if False: + tensorname = 'd' + vec = scipy.sparse.random(shape[0], 1, density=args.density, data_rvs=np.ones) + vec = vec.toarray().flatten() + formatWriter.writeout_separate_vec(vec, dirname, tensorname) + + tensorname = 'f' + vec = scipy.sparse.random(shape[1], 1, density=args.density, data_rvs=np.ones) + vec = vec.toarray().flatten() + formatWriter.writeout_separate_vec(vec, dirname, tensorname) + elif "mat_vecmul" == args.benchname or "mat_vecmul_ji" in args.benchname: + formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss01") + if not args.no_gen_other: + tensorname = 'c' + vec = scipy.sparse.random(shape[1], 1, density=args.density, data_rvs=np.ones) + vec = vec.toarray().flatten() + formatWriter.writeout_separate_vec(vec, dirname, tensorname) + elif "mat_vecmul_ij" in args.benchname: + pass + elif "mat_sddmm" in args.benchname: + pass + elif "mat_residual" in args.benchname: + if not args.no_gen_other: + tensorname = 'b' + vec = scipy.sparse.random(shape[0], 1, density=args.density, data_rvs=np.ones) + vec = vec.toarray().flatten() + formatWriter.writeout_separate_vec(vec, dirname, tensorname) + + tensorname = 'd' + vec = scipy.sparse.random(shape[1], 1, density=args.density, data_rvs=np.ones) + vec = vec.toarray().flatten() + formatWriter.writeout_separate_vec(vec, dirname, tensorname) + elif "mat_identity" in args.benchname: + pass + else: + raise NotImplementedError + + +parser = argparse.ArgumentParser(description="Process some suitesparse matrices into per-level datastructures") +parser.add_argument('-n', '--name', metavar='ssname', type=str, action='store', help='tensor name to run format ' + 'conversion on one SS tensor') +parser.add_argument('-f', '--format', metavar='ssformat', type=str, action='store', help='The format that the tensor ' + 'should be converted to') +parser.add_argument('-comb', '--combined', action='store_true', default=False, help='Whether the formatted datastructures ' + 'should be in separate files') +parser.add_argument('-o', '--omit-dense', action='store_true', default=False, help='Do not create fully dense format') +parser.add_argument('-cast', '--cast', action='store_true', default=False, help='Safe sparsity cast to int for values') +parser.add_argument('-hw', '--hw', action='store_true', default=False, + help='Only generate formats used for hardware testing (all sparse' + 'levels, concordant)') +parser.add_argument('-b', '--benchname', type=str, default=None, help='test name to run format ' + 'conversion on') +parser.add_argument('--input_path', type=str, default=None) +parser.add_argument('--output_dir_path', type=str, default=None) +parser.add_argument('--tiles', action='store_true') +parser.add_argument('--no_gen_other', action='store_true', help="Whether this" + "script should generate the random 'other' tensors") +parser.add_argument('--seed', type=int, default=0, help='Random seed needed for gen_other') +parser.add_argument('--density', type=int, default=0.25, help='If gen_other, used for density of "other" tensor') +args = parser.parse_args() + +np.random.seed(args.seed) + +inputCache = InputCacheSuiteSparse() +formatWriter = FormatWriter(args.cast) + +cwd = os.getcwd() +if args.output_dir_path is None: + out_dirname = SUITESPARSE_FORMATTED_PATH +else: + out_dirname = args.output_dir_path + +out_path = Path(out_dirname) +out_path.mkdir(parents=True, exist_ok=True, mode=0o777) + +if args.name is None: + print("Please enter a matrix name") + exit() + +if args.input_path is None: + SS_PATH = os.getenv('SUITESPARSE_TENSOR_PATH', default=os.path.join(cwd, 'suitesparse')) + +else: + SS_PATH = args.input_path + +tensor = None +mtx_files = None +if args.tiles: + # get all mtx tile files from args.input_path + # mtx_files = [os.path.join(args.input_path, fname) for fname in os.listdir(args.input_path) if fname.endswith(".mtx")] + mtx_files = [os.path.join(args.input_path, fname) for fname in os.listdir(args.input_path)] + + tensor = [SuiteSparseTensor(mtx_file) for mtx_file in mtx_files] +elif args.input_path is not None: + tensor = SuiteSparseTensor(args.input_path) +else: + print(SS_PATH) + tensor = SuiteSparseTensor(SS_PATH) + +if args.format is not None: + assert args.format in formats + filename = os.path.join(out_path, args.name + "_" + args.format + ".txt") + + coo = inputCache.load(tensor, False) + formatWriter.writeout(coo, args.format, filename) +elif args.combined: + for format_str in formats: + filename = os.path.join(out_path, args.name + "_" + format_str + ".txt") + print("Writing " + args.name + " " + format_str + "...") + + coo = inputCache.load(tensor, False) + formatWriter.writeout(coo, format_str, filename) + + shifted_filename = os.path.join(out_path, args.name + "_shifted_" + format_str + ".txt") + shifted = ScipyTensorShifter().shiftLastMode(coo) + formatWriter.writeout(shifted, format_str, shifted_filename) + + trans_filename = os.path.join(out_path, args.name + "_trans_shifted_" + format_str + ".txt") + trans_shifted = shifted.transpose() + formatWriter.writeout(trans_shifted, format_str, trans_filename) +elif args.hw: + if args.tiles and tensor is not None: + print("tensor lengths = ", len(tensor)) + for i, ten in enumerate(tensor): + tile_name = os.path.split(mtx_files[i])[1].split(".")[0] + write_datastructure_tiles(args, ten, out_path, tile_name) + else: + write_datastructure_bench(args, tensor, out_path) + +else: + print("Writing " + args.name + " original...") + dirname = os.path.join(out_path, args.name, "orig") + dirpath = Path(dirname) + dirpath.mkdir(parents=True, exist_ok=True, mode=0o777) + tensorname = "B" + coo = inputCache.load(tensor, False) + formatWriter.writeout_separate(coo, dirname, tensorname, omit_dense=args.omit_dense) + + print("Writing " + args.name + " shifted...") + dirname = os.path.join(out_path, args.name, "shift") + dirpath = Path(dirname) + dirpath.mkdir(parents=True, exist_ok=True, mode=0o777) + tensorname = "C" + shifted = ScipyTensorShifter().shiftLastMode(coo) + formatWriter.writeout_separate(shifted, dirname, tensorname, omit_dense=args.omit_dense) + + print("Writing " + args.name + " shifted and transposed...") + dirname = os.path.join(out_path, args.name, "shift-trans") + dirpath = Path(dirname) + dirpath.mkdir(parents=True, exist_ok=True, mode=0o777) + tensorname = "C" + trans_shifted = shifted.transpose() + formatWriter.writeout_separate(trans_shifted, dirname, tensorname, omit_dense=args.omit_dense) diff --git a/scripts/formatting/datastructure_tns.py b/scripts/formatting/datastructure_tns.py new file mode 100644 index 00000000..9e5d743e --- /dev/null +++ b/scripts/formatting/datastructure_tns.py @@ -0,0 +1,213 @@ +import argparse +import os +import shutil +import scipy.sparse +import numpy as np +import sys +import random +import shutil + +from pathlib import Path +from scripts.util.util import parse_taco_format + +from scripts.util.util import FormatWriter, SuiteSparseTensor, InputCacheSuiteSparse +# custom_path = '/nobackup/jadivara/sam/sam/util.py' +# sys.path.append(custom_path) +# from import SUITESPARSE_FORMATTED_PATH, ScipyTensorShifter + +cwd = os.getcwd() + +formats = ["sss012", "ss01", "dss", "dds", "ddd", "dsd", "sdd", "sds", "ssd"] + +parser = argparse.ArgumentParser(description="Process some Frostt tensors into per-level datastructures") +parser.add_argument('-n', '--name', metavar='fname', type=str, action='store', + help='tensor name to run format conversion on one frostt tensor') +parser.add_argument('-f', '--format', metavar='fformat', type=str, action='store', + help='The format that the tensor should be converted to') +parser.add_argument('-i', '--int', action='store_false', default=True, help='Safe sparsity cast to int for values') +parser.add_argument('-s', '--shift', action='store_false', default=True, help='Also format shifted tensor') +parser.add_argument('-o', '--other', action='store_true', default=False, help='Format other tensor') +parser.add_argument('-ss', '--suitesparse', action='store_true', default=False, help='Format suitesparse other tensor') +parser.add_argument('-hw', '--hw', action='store_true', default=False, + help='Format filenames as in AHA SCGRA _mode_') +parser.add_argument('-np', '--numpy', action='store_true', default=False, help='Format numpy tensors') +parser.add_argument('-b', '--bench', type=str, default=None, help='Name of benchmark') +parser.add_argument('--density', type=int, default=0.25, help='If gen_other, used for density of "other" tensor') +parser.add_argument('-cast', '--cast', action='store_true', default=False, help='Safe sparsity cast to int for values') + +args = parser.parse_args() +if args.other: + if args.suitesparse: + outdir_name = os.getenv('SUITESPARSE_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) + else: + outdir_name = os.getenv('FROSTT_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) + taco_format_dirname = os.getenv('TACO_TENSOR_PATH') + if taco_format_dirname is None: + print("Please set the TACO_TENSOR_PATH environment variable") + exit() + taco_format_dirname = os.path.join(taco_format_dirname, "other-formatted-taco") +else: + outdir_name = os.getenv('FROSTT_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats')) + taco_format_dirname = os.getenv('FROSTT_FORMATTED_TACO_PATH') + if taco_format_dirname is None: + print("Please set the FROSTT_FORMATTED_TACO_PATH environment variable") + exit() + +out_path = Path(outdir_name) +out_path.mkdir(parents=True, exist_ok=True) + +formatWriter = FormatWriter(args.cast) + +if args.name is None: + print("Please enter a tensor name") + exit() + +#breakpoint() +if args.format is not None: + assert args.format in formats + levels = args.format[:-3] + + if os.path.exists('sam/FROST_FORMATTED/rand_tensor*'): + shutil.rmtree('sam/FROST_FORMATTED/rand_tensor*') + + if args.bench != "tensor3_elemadd" and args.bench != "tensor3_innerprod": + assert args.bench is not None + #$FROSTT_FORMATTED_TACO_PATH + taco_format_orig_filename = "/home/avb03/sam/FROST_FORMATTED_TACO" + outdir_other_name = os.path.join(outdir_name, args.name, args.bench) + # outdir_other_name = os.path.join(outdir_name, args.name, 'other', otherfile[:-4]) + outdir_orig_path = Path(outdir_other_name) + outdir_orig_path.mkdir(parents=True, exist_ok=True) + + name = None + taco_format_orig_filename = os.path.join(taco_format_dirname, args.name + "_" + levels + '.txt') + + inputCache = InputCacheSuiteSparse() + + if args.bench == "tensor3_ttv": + outdir_orig_name = os.path.join(outdir_name, args.name, args.bench, args.format) + outdir_orig_path = Path(outdir_orig_name) + outdir_orig_path.mkdir(parents=True, exist_ok=True) + + taco_format_orig_filename = "/home/avb03/sam/FROST_FORMATTED_TACO/" + args.name + "_" + levels + '.txt' + parse_taco_format(taco_format_orig_filename, outdir_orig_name, 'B', args.format) + #Need this line? formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss10") + file_path_name = os.path.join(outdir_orig_name, "tensor_B_mode_shape") + file1 = open(file_path_name, 'r') + shape = [0]*3 + lines = file1.readlines() + count = 0 + + # Strips the newline character + for line in lines: + shape[count] = int(line) + count += 1 + # coo = inputCache.load(tensor, False) + + # formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss10") + tensorname = 'c' + vec = scipy.sparse.random(shape[2], 1, density=args.density, data_rvs=np.ones) + vec = vec.toarray().flatten() + tensor_out_path = os.path.join(out_path, args.name, args.bench, args.format) + formatWriter.writeout_separate_vec(vec, tensor_out_path, tensorname) + + # vec = scipy.sparse.random(shape[2], 1, data_rvs=np.ones) + # vec = vec.toarray().flatten() + # formatWriter.writeout_separate_vec(vec, out_path, tensorname) + #FormatWriter.writeout_separate_vec(vec, out_path, tensorname, tensorname) + #formatWriter.writeout_separate_sparse_only() + elif args.bench == "tensor3_ttm": + outdir_orig_name = os.path.join(outdir_name, args.name, args.bench, args.format) + outdir_orig_path = Path(outdir_orig_name) + outdir_orig_path.mkdir(parents=True, exist_ok=True) + + taco_format_orig_filename = "/home/avb03/sam/FROST_FORMATTED_TACO/" + args.name + "_" + levels + '.txt' + parse_taco_format(taco_format_orig_filename, outdir_orig_name, 'B', args.format) + #Need this line? formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss10") + file_path_name = os.path.join(outdir_orig_name, "tensor_B_mode_shape") + file1 = open(file_path_name, 'r') + shape = [0]*3 + lines = file1.readlines() + count = 0 + + # Strips the newline character + for line in lines: + shape[count] = int(line) + count += 1 + # coo = inputCache.load(tensor, False) + dimension_k = random.randint(min(shape), 10) + dimension_l = shape[2] + dimension_j = shape[1] + + # formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss10") + tensorname = 'C' + matrix = scipy.sparse.random(dimension_k, dimension_l, density=args.density, data_rvs=np.ones).toarray() + tensor_out_path = os.path.join(out_path, args.name, args.bench, args.format) + formatWriter.writeout_separate_sparse_only(matrix, tensor_out_path, tensorname) + + # vec = scipy.sparse.random(shape[2], 1, data_rvs=np.ones) + # vec = vec.toarray().flatten() + # formatWriter.writeout_separate_vec(vec, out_path, tensorname) + #FormatWriter.writeout_separate_vec(vec, out_path, tensorname, tensorname) + #formatWriter.writeout_separate_sparse_only() + elif args.bench == "tensor3_mttkrp": + outdir_orig_name = os.path.join(outdir_name, args.name, args.bench, args.format) + outdir_orig_path = Path(outdir_orig_name) + outdir_orig_path.mkdir(parents=True, exist_ok=True) + + taco_format_orig_filename = "/home/avb03/sam/FROST_FORMATTED_TACO/" + args.name + "_" + levels + '.txt' + parse_taco_format(taco_format_orig_filename, outdir_orig_name, 'B', args.format) + + file_path_name = os.path.join(outdir_orig_name, "tensor_B_mode_shape") + file1 = open(file_path_name, 'r') + shape = [0]*3 + lines = file1.readlines() + count = 0 + + # Strips the newline character + for line in lines: + shape[count] = int(line) + count += 1 + + dimension_i = shape[0] + dimension_k = shape[1] + dimension_l = shape[2] + dimension_j = random.randint(min(shape), 10) + + # formatWriter.writeout_separate_sparse_only(coo, dirname, tensorname, format_str="ss10") + tensorname = 'C' + matrix = scipy.sparse.random(dimension_j, dimension_k, density=args.density, data_rvs=np.ones).toarray() + tensor_out_path = os.path.join(out_path, args.name, args.bench, args.format) + formatWriter.writeout_separate_sparse_only(matrix, tensor_out_path, tensorname) + + tensorname = 'D' + matrix = scipy.sparse.random(dimension_j, dimension_l, density=args.density, data_rvs=np.ones).toarray() + tensor_out_path = os.path.join(out_path, args.name, args.bench, args.format) + formatWriter.writeout_separate_sparse_only(matrix, tensor_out_path, tensorname) + else: + raise NotImplementedError + + assert tensorname is not None, "Other tensor name was not set properly and is None" + # parse_taco_format(taco_format_orig_filename, outdir_other_name, tensorname, args.format, hw_filename=args.hw) + + else: + #this code is used for: tensor3_elemadd, tensor3_innerprod + taco_format_orig_filename = os.path.join(taco_format_dirname, args.name + "_" + levels + '.txt') + taco_format_shift_filename = os.path.join(taco_format_dirname, args.name + '_shift_' + levels + '.txt') + + # Original + outdir_orig_name = os.path.join(outdir_name, args.name, args.bench, args.format) + outdir_orig_path = Path(outdir_orig_name) + outdir_orig_path.mkdir(parents=True, exist_ok=True) + + taco_format_orig_filename = "/home/avb03/sam/FROST_FORMATTED_TACO/" + args.name + "_" + levels + '.txt' + parse_taco_format(taco_format_orig_filename, outdir_orig_name, 'B', args.format) + + # Shifted + if args.shift: + outdir_shift_name = os.path.join(outdir_name, args.name, args.bench, args.format) + outdir_shift_path = Path(outdir_shift_name) + outdir_shift_path.mkdir(parents=True, exist_ok=True) + + taco_format_shift_filename = "/home/avb03/sam/FROST_FORMATTED_TACO/" + args.name + "_shift_" + levels + '.txt' + parse_taco_format(taco_format_shift_filename, outdir_shift_name, 'C', args.format) diff --git a/scripts/download_unpack_format_suitesparse.sh b/scripts/formatting/download_unpack_format_suitesparse.sh similarity index 71% rename from scripts/download_unpack_format_suitesparse.sh rename to scripts/formatting/download_unpack_format_suitesparse.sh index 5250d256..5dc4b09c 100755 --- a/scripts/download_unpack_format_suitesparse.sh +++ b/scripts/formatting/download_unpack_format_suitesparse.sh @@ -1,8 +1,10 @@ #!/bin/bash +# Command: ./scripts/formatting/download_unpack_format_suitesparse.sh + basedir=$(pwd) path=$basedir/jsons -download_script=scripts/download_suitesparse_stream_overhead.sh +download_script=scripts/get_data/download_suitesparse_partial.sh mkdir -p $path @@ -30,7 +32,7 @@ BENCHMARKS=( echo "mkdir -p ${SUITESPARSE_PATH}" >> $download_script echo "pushd ." >> $download_script echo "cd ${SUITESPARSE_PATH}" >> $download_script -grep -F -f $1 scripts/download_suitesparse.sh >> $download_script +grep -F -f $1 scripts/get_data/download_suitesparse.sh >> $download_script echo "popd" >> $download_script # Make it an executable @@ -40,13 +42,13 @@ chmod ugo+x $download_script ./$download_script # Unpack the downloaded suitesparse files since they come in .tar format -./scripts/unpack_suitesparse.sh $(realpath $1) +./scripts/get_data/unpack_suitesparse.sh $(realpath $1) for b in ${!BENCHMARKS[@]}; do bench=${BENCHMARKS[$b]} while read line; do echo "Generating input format files for $line..." sspath=${SUITESPARSE_PATH}/$line - SUITESPARSE_TENSOR_PATH=$sspath python $basedir/scripts/datastructure_suitesparse.py -n $line -hw -b $bench + SUITESPARSE_TENSOR_PATH=$sspath python $basedir/scripts/formatting/datastructure_suitesparse.py -n $line -hw -b $bench done <$(realpath $1) done diff --git a/scripts/generate_frostt_formats.sh b/scripts/formatting/generate_frostt_formats.sh similarity index 53% rename from scripts/generate_frostt_formats.sh rename to scripts/formatting/generate_frostt_formats.sh index 6417bbef..26310bf8 100755 --- a/scripts/generate_frostt_formats.sh +++ b/scripts/formatting/generate_frostt_formats.sh @@ -2,6 +2,8 @@ #SBATCH -N 1 #SBATCH -t 360 +# Command: ./scripts/formatting/generate_frostt_formats.sh + DATASET_NAMES=( fb1k fb10k @@ -20,12 +22,6 @@ FORMATS=( sss012 ) -#export SUITESPARSE_PATH=/nobackup/owhsu/sparse-datasets/suitesparse/ -#export FROSTT_PATH=/nobackup/owhsu/sparse-datasets/frostt/ -#export SUITESPARSE_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/suitesparse-formatted -#export FROSTT_FORMATTED_TACO_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted/taco-tensor -#export FROSTT_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted - basedir=$(pwd) for i in ${!FORMATS[@]}; do @@ -39,8 +35,8 @@ for i in ${!FORMATS[@]}; do name=${DATASET_NAMES[$j]} echo "Generating input format files for $name..." - python $basedir/scripts/datastructure_tns.py -n $name -f $format - python $basedir/scripts/datastructure_tns.py -n $name -f $format --other + python $basedir/scripts/formatting/datastructure_tns.py -n $name -f $format + python $basedir/scripts/formatting/datastructure_tns.py -n $name -f $format --other chmod -R 775 $FROSTT_FORMATTED_PATH done done diff --git a/scripts/generate_suitesparse_formats.sh b/scripts/formatting/generate_suitesparse_formats.sh similarity index 61% rename from scripts/generate_suitesparse_formats.sh rename to scripts/formatting/generate_suitesparse_formats.sh index a12d9eea..dedd9308 100755 --- a/scripts/generate_suitesparse_formats.sh +++ b/scripts/formatting/generate_suitesparse_formats.sh @@ -2,16 +2,20 @@ #SBATCH -N 1 #SBATCH -t 360 +# Command: ./scripts/formatting/generate_suitesparse_formats.sh + BENCHMARKS=( - matmul_ikj - matmul_ijk - matmul_kij - mat_elemmul - mat_elemadd - mat_elemadd3 +# matmul_ikj + matmul_ijk +# matmul_kij + mat_elemmul + mat_elemadd + mat_elemadd3 mat_residual mat_mattransmul - mat_identity + mat_vecmul +# mat_identity + mat_sddmm ) # This is a list of benchmarks that have "other" tensors that are generated @@ -32,10 +36,10 @@ for b in ${!BENCHMARKS[@]}; do sspath=${SUITESPARSE_PATH}/$name echo "Generating input format files for $name..." - SUITESPARSE_TENSOR_PATH=$sspath python $basedir/scripts/datastructure_suitesparse.py -n $name -hw -b $bench + SUITESPARSE_TENSOR_PATH=$sspath python3 $basedir/scripts/formatting/datastructure_suitesparse.py -n $name -hw -b $bench if [[ $OTHERBENCHES =~ "$bench" ]]; then echo "Generating format of 'other' tensor" - python $basedir/scripts/datastructure_tns.py -n $line -f ss01 --other -ss -b $bench -hw + python3 $basedir/scripts/formatting/datastructure_tns.py -n $line -f ss01 --other -ss -b $bench -hw fi done <$textfile diff --git a/scripts/gen_sam_apps/README.md b/scripts/gen_sam_apps/README.md new file mode 100644 index 00000000..f644184d --- /dev/null +++ b/scripts/gen_sam_apps/README.md @@ -0,0 +1,6 @@ +# Generate SAM Simulator Applications + +The `scripts/gen_apps/` folder contains scripts used to generate the pytest applications (in Python) needed to run the SAM simulations + +1. `test_generating_code.py` - Python script that takes in a SAM graph and generates the pytest application in the `sam/sam/sim/test/apps/` directory. + diff --git a/scripts/test_generating_code.py b/scripts/gen_sam_apps/test_generating_code.py similarity index 97% rename from scripts/test_generating_code.py rename to scripts/gen_sam_apps/test_generating_code.py index d92b39b9..17bfa91b 100755 --- a/scripts/test_generating_code.py +++ b/scripts/gen_sam_apps/test_generating_code.py @@ -151,8 +151,11 @@ def generate_header(f, out_name): f.write("from sam.sim.src.token import *\n") f.write("from sam.sim.test.test import *\n") f.write("from sam.sim.test.gold import *\n") + f.write("from sam.sim.test.gen_gantt import gen_gantt\n") + f.write("\n") f.write("import os\n") f.write("import csv\n") + f.write("\n") f.write("cwd = os.getcwd()\n") if out_name in suitesparse_list: f.write("formatted_dir = os.getenv('SUITESPARSE_FORMATTED_PATH', default=os.path.join(cwd, 'mode-formats'))\n") @@ -195,6 +198,19 @@ def get_common_test_name(test_name): return test_name +def get_out_crd_str(d, u_, index_value): + # By default, the input primitive connected to a crddrop will be a level scanner + out_crd_str = "out_crd" + # However, if the input primitive is another crddrop, we need to make sure it's reading from + # the correct input crddrop output. + if d[u_]["type"] == "crddrop": + if index_value == d[u_]["inner"]: + out_crd_str += "_inner" + elif index_value == d[u_]["outer"]: + out_crd_str += "_outer" + return out_crd_str + + def generate_datasets_code(f, tensor_formats, scope_lvl, tensor_info, tensor_format_parse, test_name): # Assuming the format is csr and csc: for ten in tensor_format_parse.return_all_tensors(): @@ -402,8 +418,12 @@ def finish_outputs(f, elements, nodes_completed): def generate_benchmarking_code(f, tensor_format_parse, test_name): - f.write("\n" + tab(1) + "def bench():\n") + f.write("\n") + f.write(tab(1) + "# Print out cycle count for pytest output\n") + f.write(tab(1) + "print(time_cnt)\n") + f.write(tab(1) + "def bench():\n") f.write(tab(2) + "time.sleep(0.01)\n\n") + f.write("\n") f.write(tab(1) + "extra_info = dict()\n") f.write(tab(1) + "extra_info[\"dataset\"] = " + get_dataset_name(test_name) + "\n") f.write(tab(1) + "extra_info[\"cycles\"] = time_cnt\n") @@ -422,7 +442,10 @@ def generate_benchmarking_code(f, tensor_format_parse, test_name): if d[u]["type"] in statistic_available: f.write(tab(1) + "sample_dict = " + d[u]["object"] + ".return_statistics()\n") f.write(tab(1) + "for k in sample_dict.keys():\n") - f.write(tab(2) + "extra_info[\"" + d[u]["object"] + "\" + \"_\" + k] = sample_dict[k]\n\n") + f.write(tab(2) + "extra_info[\"" + d[u]["object"] + "\" + \"/\" + k] = sample_dict[k]\n\n") + + f.write(tab(1) + "gen_gantt(extra_info, \"" + test_name + "\")\n") + f.write("\n") def generate_check_against_gold_code(f, tensor_format_parse, test_name): @@ -529,7 +552,7 @@ def get_all_files(directory_path): continue out_name.append(filename[0:-3]) # checking if it is a file - print(out_name[-1]) + print("Test Name:", out_name[-1]) if os.path.isfile(f): file_paths.append(f) return file_paths, out_name @@ -800,9 +823,13 @@ def get_all_files(directory_path): for u_ in data.get_parents()[v]: index_value = data.get_edge_data()[v][data.get_parents()[v].index(u_)][-1] if index_value == d[v]["inner"]: - f.write(tab(2) + d[v]["object"] + ".set_inner_crd" + "(" + d[u_]["object"] + ".out_crd())\n") + out_crd_str = get_out_crd_str(d, u_, index_value) + f.write(tab(2) + d[v]["object"] + ".set_inner_crd" + "(" + d[u_]["object"] + "." + + out_crd_str + "())\n") if index_value == d[v]["outer"]: - f.write(tab(2) + d[v]["object"] + ".set_outer_crd" + "(" + d[u_]["object"] + ".out_crd())\n") + out_crd_str = get_out_crd_str(d, u_, index_value) + f.write(tab(2) + d[v]["object"] + ".set_outer_crd" + "(" + d[u_]["object"] + "." + + out_crd_str + "())\n") nodes_updating_list.append(tab(2) + d[v]["object"] + ".update()\n") # f.write(tab(2) + d[v]["object"] + ".update()\n\n") data.add_done(v) @@ -923,7 +950,6 @@ def get_all_files(directory_path): if "val" not in data.get_edge_data()[v][i] and "spaccumulator" \ in d[u_]["object"]: local_index = data.get_edge_data()[v][i][-1] - print(d[u_], " ", local_index, " ", apath) if d[u_]["in0"] == local_index: local_cord = "_inner" else: diff --git a/scripts/generate_frostt_formats_onyx.sh b/scripts/generate_frostt_formats_onyx.sh new file mode 100755 index 00000000..9a84bde2 --- /dev/null +++ b/scripts/generate_frostt_formats_onyx.sh @@ -0,0 +1,59 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -t 360 + +# ./scripts/generate_frostt_formats_onyx.sh + +FORMATS=( + sss012 +) + +BENCHMARKS=( + #using all tensor apps except elemmul here** + # tensor3_elemadd + # tensor3_innerprod + tensor3_ttv + # tensor3_elemmul + # tensor3_mttkrp + # tensor3_ttm + # using tensor3_ttm +) + +# OTHERBENCHES='["tensor3_ttv"]' +# export SUITESPARSE_PATH=/nobackup/owhsu/sparse-datasets/suitesparse/ +# export FROSTT_PATH=/nobackup/owhsu/sparse-datasets/frostt/ +# export SUITESPARSE_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/suitesparse-formatted +# export FROSTT_FORMATTED_TACO_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted/taco-tensor +# export FROSTT_FORMATTED_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted + +export SUITESPARSE_PATH=/nobackup/owhsu/sparse-datasets/suitesparse/ +export FROSTT_PATH=/home/avb03/sparse-datasets/tensors +export SUITESPARSE_FORMATTED_PATH=/home/avb03/sam/SUITESPARSE_FORMATTED +export FROSTT_FORMATTED_TACO_PATH=/home/avb03/sam/FROST_FORMATTED_TACO +export FROSTT_FORMATTED_PATH=/home/avb03/sam/FROST_FORMATTED +export TACO_TENSOR_PATH=/home/avb03/sam/TACO_TENSOR + +basedir=$(pwd) + +for i in ${!FORMATS[@]}; do + format=${FORMATS[@]}; + echo "Generating files for format $format..." + + $basedir/compiler/taco/build/bin/taco-test sam.pack_$format + $basedir/compiler/taco/build/bin/taco-test sam.pack_other_frostt + for b in ${!BENCHMARKS[@]}; do + bench=${BENCHMARKS[$b]} + while read line; do + + name=$line + echo "Generating input format files for $name..." + python3 $basedir/scripts/formatting/datastructure_tns.py -n $name -f $format -b $bench -hw + python3 $basedir/scripts/formatting/datastructure_tns.py -n $name -f $format --other -b $bench -hw + # if [[ $OTHERBENCHES =~ "$bench" ]]; then + # echo "Generating format of 'other' tensor" + # python3 $basedir/scripts/datastructure_tns_old.py -n $line -f ss01 --other -ss -b $bench -hw + # fi + chmod -R 775 $FROSTT_FORMATTED_PATH + done <$1 + done +done diff --git a/scripts/generate_synthetics_extensor.sh b/scripts/generate_synthetics_extensor.sh deleted file mode 100755 index fd6ed055..00000000 --- a/scripts/generate_synthetics_extensor.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash - -# Vars -if [ -z "$2" ] -then - export SYNTHETIC_PATH="$(pwd)/synthetic/" -else - export SYNTHETIC_PATH="$2" -fi - -export SRC_PATH="$(pwd)/sam/onyx/synthetic/" - -# Create the main directories -mkdir -p $SYNTHETIC_PATH -pushd $SYNTHETIC_PATH -for vectype in "random" "blocks" "runs" -do - mkdir -p "${SYNTHETIC_PATH}/${vectype}/compressed/" - mkdir -p "${SYNTHETIC_PATH}/${vectype}/uncompressed/" - case $vectype in - random) - python ${SRC_PATH}/generate_random_mats.py --seed 0 --output_dir ${SYNTHETIC_PATH}/${vectype}/compressed/ --name B --shape 2000 --output_format CSF - python ${SRC_PATH}/generate_random_mats.py --seed 1 --output_dir ${SYNTHETIC_PATH}/${vectype}/compressed/ --name C --shape 2000 --output_format CSF - python ${SRC_PATH}/generate_random_mats.py --seed 0 --output_dir ${SYNTHETIC_PATH}/${vectype}/uncompressed/ --name B --shape 2000 --output_format UNC - python ${SRC_PATH}/generate_random_mats.py --seed 1 --output_dir ${SYNTHETIC_PATH}/${vectype}/uncompressed/ --name C --shape 2000 --output_format UNC - ;; - blocks) - for bs in 1 2 5 10 20 30 40 50 75 100 200 300 400 - do - nnz=400 - python ${SRC_PATH}/generate_blocks.py --seed 0 --output_dir ${SYNTHETIC_PATH}/${vectype}/compressed/ --number_nonzeros $nnz --len_blocks $bs --shape 2000 --output_format CSF - python ${SRC_PATH}/generate_blocks.py --seed 0 --output_dir ${SYNTHETIC_PATH}/${vectype}/uncompressed/ --number_nonzeros $nnz --len_blocks $bs --shape 2000 --output_format UNC - # python ${SRC_PATH}/generate_blocks.py --seed 0 --output_dir ${SYNTHETIC_PATH}/${vectype}/compressed/ --shape 2000 --output_format CSF - # python ${SRC_PATH}/generate_blocks.py --seed 0 --output_dir ${SYNTHETIC_PATH}/${vectype}/uncompressed/ --shape 2000 --output_format UNC - done - ;; - runs) - for rl in 1 2 5 10 20 30 40 50 75 100 200 300 400 - do - python ${SRC_PATH}/generate_runs.py --seed 0 --output_dir ${SYNTHETIC_PATH}/${vectype}/compressed/ --shape 2000 --number_nonzeros 400 --run_length $rl --output_format CSF - python ${SRC_PATH}/generate_runs.py --seed 0 --output_dir ${SYNTHETIC_PATH}/${vectype}/uncompressed/ --shape 2000 --number_nonzeros 400 --run_length $rl --output_format UNC - # python ${SRC_PATH}/generate_runs.py --seed 0 --output_dir ${SYNTHETIC_PATH}/${vectype}/compressed/ --output_format CSF --run_lengths 100 200 - # python ${SRC_PATH}/generate_runs.py --seed 0 --output_dir ${SYNTHETIC_PATH}/${vectype}/uncompressed/ --output_format UNC --run_lengths 100 200 - done - ;; - esac -done -popd - -# Now generate the matrices in both DCSR/DCSC formats -pushd $SYNTHETIC_PATH - -mkdir -p "${SYNTHETIC_PATH}/matrix/DCSR" -mkdir -p "${SYNTHETIC_PATH}/matrix/DCSC" -mkdir -p "${SYNTHETIC_PATH}/matrix/DENSE" - -i=250 -j=250 -k=100 - -sparsity="0.95" - -python ${SRC_PATH}/generate_random_mats.py --seed 0 --sparsity $sparsity --output_dir ${SYNTHETIC_PATH}/matrix/DCSR/ --name B --shape $i $k --output_format CSF -python ${SRC_PATH}/generate_random_mats.py --seed 0 --sparsity $sparsity --output_dir ${SYNTHETIC_PATH}/matrix/DCSC/ --name B --shape $i $k --output_format CSF --transpose -python ${SRC_PATH}/generate_random_mats.py --seed 0 --sparsity $sparsity --output_dir ${SYNTHETIC_PATH}/matrix/DENSE/ --name B --shape $i $k --output_format UNC - -python ${SRC_PATH}/generate_random_mats.py --seed 1 --sparsity $sparsity --output_dir ${SYNTHETIC_PATH}/matrix/DCSR/ --name C --shape $k $j --output_format CSF -python ${SRC_PATH}/generate_random_mats.py --seed 1 --sparsity $sparsity --output_dir ${SYNTHETIC_PATH}/matrix/DCSC/ --name C --shape $k $j --output_format CSF --transpose -python ${SRC_PATH}/generate_random_mats.py --seed 1 --sparsity $sparsity --output_dir ${SYNTHETIC_PATH}/matrix/DENSE/ --name C --shape $k $j --output_format UNC - -popd \ No newline at end of file diff --git a/scripts/get_data/README.md b/scripts/get_data/README.md new file mode 100644 index 00000000..116f1ae1 --- /dev/null +++ b/scripts/get_data/README.md @@ -0,0 +1,11 @@ +# Get Data + +The `scripts/get_data` folder contains scripts used to download and unpack +datasets (SuiteSparse matrices and FROSTT tensors) + +1. `download_frostt.sh` - Download and unpack FROSTT tns files into + `$FROSTT_PATH` +2. `download_suitesparse.sh` - Download SuiteSparse mtx files into + `$SUITESPARSE_PATH` +3. `unpack_suitesparse.sh` - Unpack SuiteSparse mtx files in `$SUITESPARSE_PATH` based on a file +4. `unpack_suitesparse_all.sh` - Unpack SuiteSparse mtx files in `$SUITESPARSE_PATH` for all `*.tar.gz` files that exist diff --git a/scripts/download_frostt.sh b/scripts/get_data/download_frostt.sh similarity index 95% rename from scripts/download_frostt.sh rename to scripts/get_data/download_frostt.sh index bcdda91f..0f5e57fb 100755 --- a/scripts/download_frostt.sh +++ b/scripts/get_data/download_frostt.sh @@ -3,6 +3,8 @@ #SBATCH -t 360 #SBATCH -p lanka-v3 +# Command: ./scripts/get_data/download_frostt.sh + set -e TENSOR_NAMES=( @@ -37,7 +39,7 @@ TENSOR_URLS=( "https://s3.us-east-2.amazonaws.com/frostt/frostt_data/vast-2015-mc1/vast-2015-mc1-5d.tns.gz" ) -outdir=/data/scratch/owhsu/datasets/frostt +outdir=$FROSTT_PATH mkdir -p $outdir diff --git a/scripts/download_suitesparse.sh b/scripts/get_data/download_suitesparse.sh similarity index 99% rename from scripts/download_suitesparse.sh rename to scripts/get_data/download_suitesparse.sh index e07b7d0c..5f1d1f6b 100755 --- a/scripts/download_suitesparse.sh +++ b/scripts/get_data/download_suitesparse.sh @@ -2,7 +2,9 @@ #SBATCH -N 1 #SBATCH -t 360 -outdir=/nobackup/owhsu/sparse-datasets/suitesparse +# Command: ./scripts/get_data/download_suitesparse.sh + +outdir=$SUITESPARSE_PATH mkdir -p $outdir cd $outdir diff --git a/scripts/generate_synthetics.sh b/scripts/get_data/generate_synthetics.sh similarity index 100% rename from scripts/generate_synthetics.sh rename to scripts/get_data/generate_synthetics.sh diff --git a/scripts/unpack_suitesparse.sh b/scripts/get_data/unpack_suitesparse.sh similarity index 81% rename from scripts/unpack_suitesparse.sh rename to scripts/get_data/unpack_suitesparse.sh index 1f91628e..1e089784 100755 --- a/scripts/unpack_suitesparse.sh +++ b/scripts/get_data/unpack_suitesparse.sh @@ -2,6 +2,8 @@ #SBATCH -N 1 #SBATCH -t 360 +# Command: ./scripts/get_data/unpack_suitesparse.sh + pushd . cd $SUITESPARSE_PATH diff --git a/scripts/unpack_suitesparse_all.sh b/scripts/get_data/unpack_suitesparse_all.sh similarity index 79% rename from scripts/unpack_suitesparse_all.sh rename to scripts/get_data/unpack_suitesparse_all.sh index 3e5e06f1..64030d31 100644 --- a/scripts/unpack_suitesparse_all.sh +++ b/scripts/get_data/unpack_suitesparse_all.sh @@ -2,6 +2,8 @@ #SBATCH -N 1 #SBATCH -t 360 +# Command: ./scripts/get_data/unpack_suitesparse_all.sh + cd $SUITESPARSE_PATH for f in *.tar.gz; do diff --git a/scripts/prepare_files.sh b/scripts/prepare_files.sh deleted file mode 100755 index bed6796c..00000000 --- a/scripts/prepare_files.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/bin/bash -#SBATCH -N 1 -#SBATCH --mem 120000 -#SBATCH -p lanka-v3 -#SBATCH --exclusive -#SBATCH --mail-user=oliviahsu1107@gmail.com - -basedir=$(pwd) - -rm -rf $basedir/tiles/* - -./scripts/tile_ext.sh $1 memory_config_extensor_17M_llb.yaml - -python scripts/generate_gold_matmul_tiled.py --yaml_name memory_config_extensor_17M_llb.yaml diff --git a/scripts/prepare_files_no_gold.sh b/scripts/prepare_files_no_gold.sh deleted file mode 100755 index 13e3de8c..00000000 --- a/scripts/prepare_files_no_gold.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -#SBATCH -N 1 -#SBATCH --mem 120000 -#SBATCH -p lanka-v3 -#SBATCH --exclusive -#SBATCH --mail-user=oliviahsu1107@gmail.com - -rm -rf $basedir/tiles/* - -./scripts/tile_ext.sh $1 memory_config_extensor_17M_llb.yaml diff --git a/scripts/prepare_tiles_onyx.sh b/scripts/prepare_tiles_onyx.sh new file mode 100755 index 00000000..7bea7baf --- /dev/null +++ b/scripts/prepare_tiles_onyx.sh @@ -0,0 +1,33 @@ +#!/bin/bash +#sbatch -n 1 +#sbatch --mem 120000 +#sbatch -p lanka-v3 +#sbatch --exclusive + + +basedir=$(pwd) +yaml_fname=memory_config_onyx.yaml +line=random_sparsity + +nnz=$1 +dim=$2 +echo "running for point nnz=$nnz and dimsize=$dim" + +export sam_home=$basedir +export tiled_suitesparse_formatted_path=${sam_home}/tiles/matmul_ikj/formatted +export tiled_output_path=${sam_home}/tiles/matmul_ikj/output/ + +pushd . + +mkdir extensor_mtx +cd extensor_mtx +python ../sam/onyx/synthetic/generate_fixed_nnz_mats.py --nnz $nnz --dim $dim +cd .. + +mkdir -p $path + +mkdir -p $basedir/tiles/ +rm -rf $basedir/tiles/* + +./scripts/prepare_files.sh extensor_${nnz}_${dim}.mtx $yaml_fname + diff --git a/scripts/pytest_frostt.sh b/scripts/pytest_frostt.sh deleted file mode 100755 index 39f4366d..00000000 --- a/scripts/pytest_frostt.sh +++ /dev/null @@ -1,114 +0,0 @@ -#!/bin/bash -#SBATCH -N 1 -#SBATCH -t 360 -outdir=/nobackup/owhsu/sparse-datasets/frostt-formatted - -DATASET_NAMES=( - facebook - fb10k - fb1k - nell-1 - nell-2 - taco-tensor -) - -errors=() -RED='\033[0;31m' -NC='\033[0m' # No Color - -export FROSTT_PATH=/nobackup/owhsu/sparse-datasets/frostt -export FROSTT_FORMATTED_PATH=$outdir - -mkdir -p $outdir -cd ./sam/sim - -for i in ${!DATASET_NAMES[@]}; do - name=${DATASET_NAMES[$i]} - - echo "Testing $name..." -# pytest -k test_mat_mul_ijk_csr_full_i --ssname $name -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk_full") -# fi - -# pytest -k test_mat_identity_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - - pytest -k test_tensor --frosttname $name -s -vv #--debug-sim - status=$? - if [ $status -gt 0 ] - then - errors+=("${name} matmul_ijk") - fi - - -# -# pytest -k test_matmul_ijk_i --ssname $name -s #--debug-sim -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - -# pytest -k test_mat_elemmul_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - - -# pytest -k test_tensor3_elemmul_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - -# pytest -k test_matmul_jik_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - -# pytest -k test_matmul_jki_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - - - - -# pytest -k test_mat_identity_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} mat_identity") -# fi - -# pytest -k test_mat_elemmul_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} mat_identity") -# fi - - - -done - -echo -e "${RED}Failed tests:" -for i in ${!errors[@]}; do - error=${errors[$i]} - echo -e "${RED}$error," -done -echo -e "${NC}" diff --git a/scripts/pytest_suitesparse.sh b/scripts/pytest_suitesparse.sh deleted file mode 100755 index e040646c..00000000 --- a/scripts/pytest_suitesparse.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/bash -#SBATCH -N 1 -#SBATCH -t 360 -outdir=/nobackup/owhsu/sparse-datasets/suitesparse-formatted - -DATASET_NAMES=( -# bcsstm04 - bcsstm02 - bcsstm03 - lpi_bgprtr - cage4 - klein-b1 - GD02_a - GD95_b - Hamrle1 - LF10 - lpi_itest2 - lp_scsd1 -) - -errors=() -RED='\033[0;31m' -NC='\033[0m' # No Color - - -mkdir -p $outdir -cd ./sam/sim - -for i in ${!DATASET_NAMES[@]}; do - name=${DATASET_NAMES[$i]} - - echo "Testing $name..." - - pytest ./test/final-apps --ssname $name -s --check-gold #--debug-sim - status=$? - if [ $status -gt 0 ] - then - errors+=("${name}") - fi - - -# -# pytest -k test_matmul_ijk_i --ssname $name -s #--debug-sim -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - -# pytest -k test_mat_elemmul_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - - -# pytest -k test_tensor3_elemmul_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - -# pytest -k test_matmul_jik_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - -# pytest -k test_matmul_jki_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} matmul_ijk") -# fi - - - - -# pytest -k test_mat_identity_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} mat_identity") -# fi - -# pytest -k test_mat_elemmul_i --ssname $name -s -# status=$? -# if [ $status -gt 0 ] -# then -# errors+=("${name} mat_identity") -# fi - - - -done - -echo -e "${RED}Failed tests:" -for i in ${!errors[@]}; do - error=${errors[$i]} - echo -e "${RED}$error," -done -echo -e "${NC}" diff --git a/scripts/run_cpu/README.md b/scripts/run_cpu/README.md new file mode 100644 index 00000000..2b05a73e --- /dev/null +++ b/scripts/run_cpu/README.md @@ -0,0 +1,10 @@ +# CPU Baseline Scripts + +The `scripts/run_cpu/` folder contains scripts used to run +the CPU baseline tests. These are needed to compare +against SAM and/or HW generated using SAM (e.g. Onyx). + +1. `frostt_runner.sh` - Script that runs the frostt tests + in taco on the CPU +2. `suitesparse_runner.sh` - Script that runs the + SuiteSparse tests in taco on the CPU diff --git a/scripts/frostt_runner.sh b/scripts/run_cpu/frostt_runner.sh similarity index 78% rename from scripts/frostt_runner.sh rename to scripts/run_cpu/frostt_runner.sh index 9e68b46c..dbc53445 100755 --- a/scripts/frostt_runner.sh +++ b/scripts/run_cpu/frostt_runner.sh @@ -4,6 +4,8 @@ #SBATCH -p lanka-v3 #SBATCH --exclusive +# ./frostt_runner.sh +# Arg1 - Which machine is being used (0:local, 1:Lanka, 2:Kiwi/Neva) DATASET_NAMES=( fb1k @@ -13,18 +15,16 @@ DATASET_NAMES=( nell-1 ) +sspath=$SUITESPARSE_PATH cwd=$(pwd) # LANKA if [ $1 -eq 1 ]; then - sspath=/data/scratch/changwan/florida_all/. lanka=ON neva=OFF elif [ $1 -eq 2 ]; then - sspath=/nobackup/owhsu/sparse-datasets/suitesparse lanka=OFF neva=ON else - sspath=cwd/. lanka=OFF neva=OFF fi diff --git a/scripts/suitesparse_runner.sh b/scripts/run_cpu/suitesparse_runner.sh similarity index 69% rename from scripts/suitesparse_runner.sh rename to scripts/run_cpu/suitesparse_runner.sh index 33b95b6b..1e11e6ae 100755 --- a/scripts/suitesparse_runner.sh +++ b/scripts/run_cpu/suitesparse_runner.sh @@ -4,21 +4,26 @@ #SBATCH -p lanka-v3 #SBATCH --exclusive +# ./suitesparse_runner.sh +# Arg1 - Textfile with names of suitesparse tensors to run +# Arg2 - Which machine is being used (0:local, 1:Lanka, 2:Kiwi/Neva) + set -u cwd=$(pwd) sspath=$SUITESPARSE_PATH + # LANKA -if [ $2 -eq 1 ]; then - lanka=ON - neva=OFF -elif [ $2 -eq 2 ]; then - lanka=OFF - neva=ON -else - lanka=OFF - neva=OFF -fi + if [ $2 -eq 1 ]; then + lanka=ON + neva=OFF + elif [ $2 -eq 2 ]; then + lanka=OFF + neva=ON + else + lanka=OFF + neva=OFF + fi out=suitesparse-bench/taco @@ -27,9 +32,7 @@ mkdir -p "$out" while read line; do if [ $2 -eq 1 ]; then matrix="$sspath/$line/$line.mtx" - elif [ $2 -eq 2 ]; then - matrix="$sspath/$line.mtx" - else + else matrix="$sspath/$line.mtx" fi csvout="$out/result-$line.csv" diff --git a/scripts/run_onyx/README.md b/scripts/run_onyx/README.md new file mode 100644 index 00000000..82634ba8 --- /dev/null +++ b/scripts/run_onyx/README.md @@ -0,0 +1,6 @@ +# Onyx Scripts + +The `scripts/run_onyx/` folder contains scripts used to +run Onyx test benches from the [`aha/`](https://github.com/StanfordAHA/aha) flow. + +1. `sam_hw_suitesparse_runner.sh` - Script that runs the SuiteSparse tests in Onyx (from the aha repository). diff --git a/scripts/sam_hw_suitesparse_runner.sh b/scripts/run_onyx/sam_hw_suitesparse_runner.sh similarity index 77% rename from scripts/sam_hw_suitesparse_runner.sh rename to scripts/run_onyx/sam_hw_suitesparse_runner.sh index 3981db0f..b9a53565 100755 --- a/scripts/sam_hw_suitesparse_runner.sh +++ b/scripts/run_onyx/sam_hw_suitesparse_runner.sh @@ -6,6 +6,12 @@ # Command: ./scripts/sam_hw_suitesparse_runner.sh <0|1|2> # Where 0 = local, 1 = Lanka, 2 = kiwi/neva +# This should be run from the sam/ directory + +# This script: +# 1. Formats teh matrices +# 2. Then runs build_tb in garnet + set -u BENCHMARKS=( @@ -21,26 +27,21 @@ errors=() RED='\033[0;31m' NC='\033[0m' # No Color +mkdir -p $TACO_TENSOR_PATH +mkdir -p $SUITESPARSE_FORMATTED_PATH +mkdir -p $FROSTT_FORMATTED_TACO_PATH +mkdir -p $FROSTT_FORMATTED_PATH + # LANKA if [ $2 -eq 1 ]; then - export SUITESPARSE_PATH=/data/scratch/changwan/florida_all - export FROSTT_PATH=/data/scratch/owhsu/datasets/frostt - export TACO_TENSOR_PATH=/data/scratch/owhsu/datasets - export SUITESPARSE_FORMATTED_PATH=/data/scratch/owhsu/datasets/suitesparse-formatted - export FROSTT_FORMATTED_TACO_PATH=/data/scratch/owhsu/datasets/frostt-formatted/taco-tensor - export FROSTT_FORMATTED_PATH=/data/scratch/owhsu/datasets/frostt-formatted - - mkdir -p $TACO_TENSOR_PATH - mkdir -p $SUITESPARSE_FORMATTED_PATH - mkdir -p $FROSTT_FORMATTED_TACO_PATH - mkdir -p $FROSTT_FORMATTED_PATH - lanka=ON neva=OFF +# KIWI/NEVA elif [ $2 -eq 2 ]; then # NEVA/KIWI lanka=OFF neva=ON +# Local Machine else lanka=OFF neva=OFF diff --git a/scripts/run_sam_sim/README.md b/scripts/run_sam_sim/README.md new file mode 100644 index 00000000..2331a759 --- /dev/null +++ b/scripts/run_sam_sim/README.md @@ -0,0 +1,36 @@ +# Scripts to Run SAM Simulations + +All scripts should ultimately run pytest to test the SAM +simulator applications + +the `scripts/run_sam_sim/` folder contains scripts that run the sam simulator for the following datasets: +1. SuiteSparse +2. FROSTT +3. Synthetically generated data + +1. `pytest_frostt.sh` - Script that runs ALL pytest tests beginning with the + name `test_tensor*` under `sam/sim/test/` with the FROSTT tensors. +2. `pytest_frostt_with_benchmarks.sh` - Script that runs only select pytest + benchmarks under `sam/sim/test/` with the FROSTT tensors. +3. `pytest_suitesparse.sh` - Script that runs ALL pytest tests in + `sam/sim/test/final-apps` with gold checking enabled for the SuiteSparse +matrices provided in `tensor_names.txt`. +4. `pytest_suitesparse_with_benchmarks.sh` - Script that runs runs select + SuiteSparse pytest benchmarks under `sam/sim/test/apps/`. This script has gold checking +disabled and aggregates results into a CSVs. +5. `run_suitesparse_final.sh` - Script that runs ALL SuiteSparse final tests in + `sam/sim/test/final-apps/` +6. `run_suitesparse_generated.sh` - Script that runs ALL SuiteSparse generated tests in + `sam/sim/test/apps/` +7. `run_suitesparse.sh` - Script that formats input SuiteSparse matrices and then runs + pytest on all SuiteSparse benchmarks in `sam/sim/test/apps` +8. `run_synthetics.sh` - Script that runs all of the synthetic benchmarks from + the ASPLOS 2023 SAM paper. +9. `sam_frostt_runner.sh` - Script that formats, runs, and generates CSVs for + all frostt benchmarks. +10. `sam_suitesparse_runner.sh` - Script that formats, runs, and generates CSVs + for all SuiteSparse benchmarks in `final-apps`. +11. `sam_suitesparse_runner_sddmmonly.sh` - Script that formats, runs, and + generates CSVs for the `final-apps` SDDMM SuiteSparse benchmark only. +12. `suitesparse_validator.sh` - Script that runs the CPU benchmarks and then + the SAM pytest benchmarks in `apps` on SuiteSparse data. diff --git a/scripts/run_sam_sim/pytest_frostt.sh b/scripts/run_sam_sim/pytest_frostt.sh new file mode 100755 index 00000000..ba78463f --- /dev/null +++ b/scripts/run_sam_sim/pytest_frostt.sh @@ -0,0 +1,37 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -t 360 + +# Run from sam/ repo +# ./scripts/run_sam_sim/pytest_frostt.sh + +# Script that runs ALL test_tensor* pytest tests under sam/sim/test + +outdir=$FROSTT_FORMATTED_PATH + +errors=() +RED='\033[0;31m' +NC='\033[0m' # No Color + +mkdir -p $outdir +cd ./sam/sim + +while read line; do + name=$line + + echo "Testing $name..." + + pytest -k test_tensor --frosttname $name -s -vv #--debug-sim + status=$? + if [ $status -gt 0 ] + then + errors+=("${name} test") + fi +done <$1 + +echo -e "${RED}Failed tests:" +for i in ${!errors[@]}; do + error=${errors[$i]} + echo -e "${RED}$error," +done +echo -e "${NC}" diff --git a/scripts/pytest_frostt_with_benchmarks.sh b/scripts/run_sam_sim/pytest_frostt_with_benchmarks.sh similarity index 57% rename from scripts/pytest_frostt_with_benchmarks.sh rename to scripts/run_sam_sim/pytest_frostt_with_benchmarks.sh index 0ba9774d..8056c3eb 100755 --- a/scripts/pytest_frostt_with_benchmarks.sh +++ b/scripts/run_sam_sim/pytest_frostt_with_benchmarks.sh @@ -2,6 +2,7 @@ #SBATCH -N 1 #SBATCH -t 360 +# ./scripts/run_sam_sim/pytest_frostt_with_benchmarks.sh BENCHMARKS=( tensor3_elemmul @@ -13,27 +14,14 @@ BENCHMARKS=( tensor_mttkrp ) -DATASET_NAMES=( - facebook - fb10k - fb1k - nell-1 - nell-2 - taco-tensor -) - -outdir=/nobackup/owhsu/sparse-datasets/frostt-formatted - -export FROSTT_PATH=/nobackup/owhsu/sparse-datasets/frostt -export FROSTT_FORMATTED_PATH=$outdir - +outdir=$FROSTT_FORMATTED_PATH errors=() RED='\033[0;31m' NC='\033[0m' # No Color -cwd=$(pwd) +basedir=$(pwd) resultdir=results @@ -41,27 +29,27 @@ cd ./sam/sim for b in ${!BENCHMARKS[@]}; do bench=${BENCHMARKS[$b]} - path=$cwd/$resultdir/$bench + path=$basedir/$resultdir/$bench - mkdir -p $cwd/$resultdir/$bench + mkdir -p $basedir/$resultdir/$bench echo "Testing $bench..." - for i in ${!DATASET_NAMES[@]}; do - name=${DATASET_NAMES[$i]} + while read line; do + name=$line echo "Testing $name..." pytest test/apps/test_$bench.py --ssname $name -s --benchmark-json=$path/$name.json - python $cwd/scripts/converter.py --json_name $path/$name.json + python $basedir/scripts/util/converter.py --json_name $path/$name.json status=$? if [ $status -gt 0 ] then errors+=("${name}, ${bench}") fi - done + done <$1 - python $cwd/scripts/bench_csv_aggregator.py $path $cwd/suitesparse_$bench.csv + python $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/suitesparse_$bench.csv done diff --git a/scripts/run_sam_sim/pytest_suitesparse.sh b/scripts/run_sam_sim/pytest_suitesparse.sh new file mode 100755 index 00000000..1e440beb --- /dev/null +++ b/scripts/run_sam_sim/pytest_suitesparse.sh @@ -0,0 +1,36 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH -t 360 + +# ./scripts/run_sam_sim/pytest_suitesparse.sh + + +outdir=$SUITESPARSE_FORMATTED_PATH + +errors=() +RED='\033[0;31m' +NC='\033[0m' # No Color + + +mkdir -p $outdir +cd ./sam/sim + +while read line; do + name=$line + + echo "Testing $name..." + + pytest ./test/final-apps --ssname $name -s --check-gold #--debug-sim + status=$? + if [ $status -gt 0 ] + then + errors+=("${name}") + fi +done < $1 + +echo -e "${RED}Failed tests:" +for i in ${!errors[@]}; do + error=${errors[$i]} + echo -e "${RED}$error," +done +echo -e "${NC}" diff --git a/scripts/pytest_suitesparse_with_benchmarks.sh b/scripts/run_sam_sim/pytest_suitesparse_with_benchmarks.sh similarity index 63% rename from scripts/pytest_suitesparse_with_benchmarks.sh rename to scripts/run_sam_sim/pytest_suitesparse_with_benchmarks.sh index 09cc2b67..e0e38f12 100755 --- a/scripts/pytest_suitesparse_with_benchmarks.sh +++ b/scripts/run_sam_sim/pytest_suitesparse_with_benchmarks.sh @@ -2,6 +2,8 @@ #SBATCH -N 1 #SBATCH -t 360 +# ./scripts/run_sam_sim/pytest_suitesparse_with_benchmarks.sh + BENCHMARKS=( matmul_kij matmul_kji @@ -22,27 +24,11 @@ BENCHMARKS=( ) -# FIXME: Need to change this to take in an input file as in taco side -DATASET_NAMES=( - bcsstm04 - bcsstm02 - bcsstm03 - lpi_bgprtr - cage4 - klein-b1 - GD02_a - GD95_b - Hamrle1 - LF10 -) - errors=() RED='\033[0;31m' NC='\033[0m' # No Color -export SUITESPARSE_PATH=/nobackup/owhsu/sparse-datasets/suitesparse -export FROSTT_PATH=/nobackup/owhsu/sparse-datasets/frostt-formatted cwd=$(pwd) resultdir=results @@ -56,13 +42,13 @@ for b in ${!BENCHMARKS[@]}; do mkdir -p $cwd/$resultdir/$bench echo "Testing $bench..." - for i in ${!DATASET_NAMES[@]}; do - name=${DATASET_NAMES[$i]} + while read line; do + name=$line echo "Testing $name..." pytest test/apps/test_$bench.py --ssname $name -s --benchmark-json=$path/$name.json - python $cwd/scripts/converter.py --json_name $path/$name.json + python $cwd/scripts/util/converter.py --json_name $path/$name.json status=$? if [ $status -gt 0 ] @@ -71,9 +57,9 @@ for b in ${!BENCHMARKS[@]}; do fi done - python $cwd/scripts/bench_csv_aggregator.py $path $cwd/suitesparse_$bench.csv + python $cwd/scripts/util/bench_csv_aggregator.py $path $cwd/suitesparse_$bench.csv -done +done < $1 echo -e "${RED}Failed tests:" for i in ${!errors[@]}; do diff --git a/scripts/run_suitesparse.sh b/scripts/run_sam_sim/run_suitesparse.sh similarity index 61% rename from scripts/run_suitesparse.sh rename to scripts/run_sam_sim/run_suitesparse.sh index 43cc9fb0..a6b26601 100755 --- a/scripts/run_suitesparse.sh +++ b/scripts/run_sam_sim/run_suitesparse.sh @@ -2,38 +2,28 @@ #SBATCH -N 1 #SBATCH -t 360 +# 1. Formats input files +# 2. Runs suitesparse sam sims in pytest + +# ./scripts/run_sam_sim/run_suitesparse.sh + # THIS FILE MUST BE RUN FROM sam/ location -outdir=/nobackup/owhsu/sparse-datasets/suitesparse-formatted +outdir=$SUITESPARSE_FORMATTED_PATH basedir=$(pwd) -DATASET_NAMES=( - bcsstm04 - bcsstm02 - bcsstm03 - lpi_bgprtr - cage4 - klein-b1 - GD02_a - GD95_b - Hamrle1 - LF10 -) - errors=() RED='\033[0;31m' NC='\033[0m' # No Color -export SUITESPARSE_PATH=/nobackup/owhsu/sparse-datasets/suitesparse -export SUITESPARSE_FORMATTED_PATH=$outdir mkdir -p $outdir -for i in ${!DATASET_NAMES[@]}; do - name=${DATASET_NAMES[$i]} +while read line; do + name=$line cd $outdir echo "Generating input format files for $name..." - python $basedir/scripts/datastructure_suitesparse.py -n $name + python $basedir/scripts/formatting/datastructure_suitesparse.py -n $name chgrp -R sparsity $outdir chmod -R 777 $outdir @@ -50,7 +40,7 @@ for i in ${!DATASET_NAMES[@]}; do cd $outdir echo "Removing format files for $name..." rm ./$name*.txt -done +done < $1 echo -e "${RED}Failed tests:" for i in ${!errors[@]}; do diff --git a/scripts/run_sam_sim/run_suitesparse_final.sh b/scripts/run_sam_sim/run_suitesparse_final.sh new file mode 100755 index 00000000..eac4e371 --- /dev/null +++ b/scripts/run_sam_sim/run_suitesparse_final.sh @@ -0,0 +1,16 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH --mem 120000 +#SBATCH -p lanka-v3 +#SBATCH --exclusive + +# ./scripts/run_sam_sim/run_suitesparse_final.sh + +pushd . +cd sam/sim + +while read line; do + pytest test/final-apps/ --ssname $line --check-gold +done < $1 + +popd diff --git a/scripts/run_sam_sim/run_suitesparse_generated.sh b/scripts/run_sam_sim/run_suitesparse_generated.sh new file mode 100755 index 00000000..4f361850 --- /dev/null +++ b/scripts/run_sam_sim/run_suitesparse_generated.sh @@ -0,0 +1,16 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH --mem 120000 +#SBATCH -p lanka-v3 +#SBATCH --exclusive + +pushd . +cd sam/sim + +echo $1 + +while read line; do + pytest test/apps/ --ssname $line --check-gold +done < $1 + +popd diff --git a/scripts/run_synthetics.sh b/scripts/run_sam_sim/run_synthetics.sh similarity index 65% rename from scripts/run_synthetics.sh rename to scripts/run_sam_sim/run_synthetics.sh index 35004921..5e5b356a 100755 --- a/scripts/run_synthetics.sh +++ b/scripts/run_sam_sim/run_synthetics.sh @@ -2,12 +2,20 @@ #SBATCH -N 1 #SBATCH -t 360 +# ./scripts/run_sam_sim/run_synthetics.sh + +# Req: Need to run this after synthetic/ is generated +# 1. Runs all of the synthetic tests from the ASPLOS 2023 SAM paper + +basedir=$(pwd) +resultdir=results + # Vars -if [ -z "$2" ] +if [ -z "$1" ] then - export SYNTHETIC_PATH="$(pwd)/synthetic/" + export SYNTHETIC_PATH="$basedir/synthetic/" else - export SYNTHETIC_PATH="$2" + export SYNTHETIC_PATH="$1" fi BENCHMARKS=( @@ -19,8 +27,6 @@ BENCHMARKS=( test_vec_elemmul_uncompressed ) -cwd=$(pwd) -resultdir=results for b in ${!BENCHMARKS[@]}; do bench=${BENCHMARKS[$b]} @@ -30,8 +36,8 @@ for b in ${!BENCHMARKS[@]}; do echo "Testing $bench..." pytest sam/sim/test/study-apps/$bench.py --synth --check-gold -k "random-40 or 0.2-blocks or 0.2-runs" --benchmark-json="$path/$bench.json" - python $cwd/scripts/converter.py --json_name $path/$bench.json - python $cwd/scripts/bench_csv_aggregator.py $path $cwd/SYNTH_OUT_ACCEL.csv + python $basedir/scripts/util/converter.py --json_name $path/$bench.json + python $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/SYNTH_OUT_ACCEL.csv done @@ -44,7 +50,7 @@ BENCHMARKS=( test_reorder_matmul_kji ) -cwd=$(pwd) +basedir=$(pwd) resultdir=results_reorder for b in ${!BENCHMARKS[@]}; do @@ -55,8 +61,8 @@ for b in ${!BENCHMARKS[@]}; do echo "Testing $bench..." pytest sam/sim/test/reorder-study/$bench.py --synth --check-gold --benchmark-json="$path/$bench.json" - python $cwd/scripts/converter.py --json_name $path/$bench.json - python $cwd/scripts/bench_csv_aggregator.py $path $cwd/SYNTH_OUT_REORDER.csv + python $basedir/scripts/util/converter.py --json_name $path/$bench.json + python $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/SYNTH_OUT_REORDER.csv done @@ -66,7 +72,7 @@ BENCHMARKS=( test_mat_sddmm_unfused ) -cwd=$(pwd) +basedir=$(pwd) resultdir=results_fusion for b in ${!BENCHMARKS[@]}; do @@ -77,8 +83,8 @@ for b in ${!BENCHMARKS[@]}; do echo "Testing $bench..." pytest sam/sim/test/fusion-study/$bench.py --synth --check-gold --benchmark-json="$path/$bench.json" - python $cwd/scripts/converter.py --json_name $path/$bench.json - python $cwd/scripts/bench_csv_aggregator.py $path $cwd/SYNTH_OUT_FUSION.csv + python $basedir/scripts/util/converter.py --json_name $path/$bench.json + python $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/SYNTH_OUT_FUSION.csv done diff --git a/scripts/run_sam_sim/sam_frostt_runner.sh b/scripts/run_sam_sim/sam_frostt_runner.sh new file mode 100755 index 00000000..59a9119f --- /dev/null +++ b/scripts/run_sam_sim/sam_frostt_runner.sh @@ -0,0 +1,84 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH --mem 120000 +#SBATCH -p lanka-v3 +#SBATCH --exclusive + +# Script steps +# 1. Formats data +# 2. Runs SAM sim in Pytest +# 3. Converts data to CSV +# 4. Aggregates CSV + +# ./scripts/run_sam_sim/sam_frostt_runner.sh + +set -u + +BENCHMARKS=( + tensor3_innerprod_FINAL + tensor3_elemadd_FINAL + tensor3_ttv_FINAL + tensor3_ttm_FINAL + tensor3_mttkrp_FINAL +) + +errors=() +RED='\033[0;31m' +NC='\033[0m' # No Color + +format_outdir=${FROSTT_FORMATTED_PATH} +basedir=$(pwd) +frosttpath=$FROSTT_PATH +benchout=frostt-bench/sam + +mkdir -p "$benchout" +mkdir -p $format_outdir +mkdir -p $TACO_TENSOR_PATH/other-formatted-taco + +make -j8 taco/build NEVA=$neva LANKA=$lanka GEN=ON + +for b in ${!BENCHMARKS[@]}; do + bench=${BENCHMARKS[$b]} + path=$basedir/$benchout/$bench + mkdir -p $basedir/$benchout/$bench + echo "Testing $bench..." + + while read line; do + name=$line + cd $format_outdir + + + if [ "$bench" == "tensor3_innerprod_FINAL" ]; then + echo "Generating input format files for $name..." + + $basedir/compiler/taco/build/bin/taco-test sam.pack_sss012 + $basedir/compiler/taco/build/bin/taco-test sam.pack_other_frostt + python $basedir/scripts/formatting/datastructure_tns.py -n $name -f sss012 + python $basedir/scripts/formatting/datastructure_tns.py -n $name -f sss012 --other + chmod -R 775 $FROSTT_FORMATTED_PATH + fi + + cd $basedir/sam/sim + + pytest test/final-apps/test_$bench.py --frosttname $name --benchmark-json=$path/$name.json + python $basedir/scripts/util/converter.py --json_name $path/$name.json + + status=$? + if [ $status -gt 0 ] + then + errors+=("${name}, ${bench}") + fi + + cd $basedir + done <$1 + + python $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/$benchout/frostt_$bench.csv + + echo -e "${RED}Failed tests:" + for i in ${!errors[@]}; do + error=${errors[$i]} + echo -e "${RED}$error," + done + echo -e "${NC}" +done + diff --git a/scripts/run_sam_sim/sam_suitesparse_runner.sh b/scripts/run_sam_sim/sam_suitesparse_runner.sh new file mode 100755 index 00000000..ff78826f --- /dev/null +++ b/scripts/run_sam_sim/sam_suitesparse_runner.sh @@ -0,0 +1,84 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH --mem 120000 +#SBATCH -p lanka-v3 +#SBATCH --exclusive + +# ./scripts/run_sam_sim/sam_suitesparse_runner.sh + +set -u + +BENCHMARKS=( + mat_vecmul_FINAL + matmul_FINAL + mat_elemadd_FINAL + mat_elemadd3_FINAL + mat_residual_FINAL + mat_mattransmul_FINAL +) + +errors=() +RED='\033[0;31m' +NC='\033[0m' # No Color + +format_outdir=${SUITESPARSE_FORMATTED_PATH} +basedir=$(pwd) +sspath=$SUITESPARSE_PATH +benchout=suitesparse-bench/sam + +mkdir -p "$benchout" +mkdir -p $format_outdir +mkdir -p $TACO_TENSOR_PATH/other-formatted-taco + +# make -j8 taco/build NEVA=$neva LANKA=$lanka GEN=ON +make -j8 taco/build GEN=ON + +for b in ${!BENCHMARKS[@]}; do + bench=${BENCHMARKS[$b]} + path=$basedir/$benchout/$bench + mkdir -p $basedir/$benchout/$bench + echo "Testing $bench..." + + while read line; do + cd $format_outdir + + if [ $2 -eq 1 ]; then + matrix="$sspath/$line/$line.mtx" + elif [ $2 -eq 2 ]; then + matrix="$sspath/$line.mtx" + else + matrix="$sspath/$line.mtx" + fi + + if [ "$bench" == "mat_vecmul_FINAL" ]; then + echo "Generating input format files for $line..." + SUITESPARSE_TENSOR_PATH=$matrix python $basedir/scripts/formatting/datastructure_suitesparse.py -n $line + + SUITESPARSE_TENSOR_PATH=$matrix $basedir/compiler/taco/build/bin/taco-test sam.pack_other_ss + python $basedir/scripts/formatting/datastructure_tns.py -n $line -f ss01 --other -ss + fi + + cd $basedir/sam/sim + + pytest test/final-apps/test_$bench.py --ssname $line -s --report-stats --benchmark-json=$path/$line.json + python $basedir/scripts/util/converter.py --json_name $path/$line.json + + status=$? + if [ $status -gt 0 ] + then + errors+=("${line}, ${bench}") + fi + + cd $basedir + done <$1 + + python $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/$benchout/suitesparse_$bench.csv + + echo -e "${RED}Failed tests:" + for i in ${!errors[@]}; do + error=${errors[$i]} + echo -e "${RED}$error," + done + echo -e "${NC}" +done + diff --git a/scripts/suitesparse_validator.sh b/scripts/run_sam_sim/suitesparse_validator.sh similarity index 81% rename from scripts/suitesparse_validator.sh rename to scripts/run_sam_sim/suitesparse_validator.sh index 8e554077..961dba79 100755 --- a/scripts/suitesparse_validator.sh +++ b/scripts/run_sam_sim/suitesparse_validator.sh @@ -3,6 +3,9 @@ #SBATCH --mem 120000 #SBATCH --exclusive +# ./scripts/run_sam_sim/suitesparse_validator.sh +# where out_path is optional + SAMNAME=( matmul_ikj vecmul_ij @@ -25,8 +28,13 @@ TACONAME=( set -u -sspath=/nobackup/owhsu/sparse-datasets/suitesparse -vout=/nobackup/owhsu/validate +sspath=$SUITESPARSE_PATH +if [ -z "$1" ] +then + vout=$basedir/validate/ +else + vout=$1 +fi mkdir -p "$vout" @@ -34,7 +42,7 @@ while read line; do matrix="$sspath/$line.mtx" # TACO - GEN=ON SUITESPARSE_TENSOR_PATH="$matrix" make -j8 validate-bench BENCHES="bench_suitesparse" VALIDATION_OUTPUT_PATH="$vout" NEVA=ON + GEN=ON SUITESPARSE_TENSOR_PATH="$matrix" make -j8 validate-bench BENCHES="bench_suitesparse" VALIDATION_OUTPUT_PATH="$vout" cd sam/sim # SAM diff --git a/scripts/sam_frostt_runner.sh b/scripts/sam_frostt_runner.sh deleted file mode 100644 index 2e48dc40..00000000 --- a/scripts/sam_frostt_runner.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/bash -#SBATCH -N 1 -#SBATCH --mem 120000 -#SBATCH -p lanka-v3 -#SBATCH --exclusive - -set -u - -BENCHMARKS=( - tensor3_innerprod_FINAL - tensor3_elemadd_FINAL - tensor3_ttv_FINAL - tensor3_ttm_FINAL - tensor3_mttkrp_FINAL -) - -TENSORS=( - fb1k - fb10k - facebook - nell-2 - nell-1 -) - - -errors=() -RED='\033[0;31m' -NC='\033[0m' # No Color - -# LANKA -if [ $1 -eq 1 ]; then - export SUITESPARSE_PATH=/data/scratch/changwan/florida_all - export FROSTT_PATH=/data/scratch/owhsu/datasets/frostt - export TACO_TENSOR_PATH=/data/scratch/owhsu/datasets - export SUITESPARSE_FORMATTED_PATH=/data/scratch/owhsu/datasets/suitesparse-formatted - export FROSTT_FORMATTED_TACO_PATH=/data/scratch/owhsu/datasets/frostt-formatted/taco-tensor - export FROSTT_FORMATTED_PATH=/data/scratch/owhsu/datasets/frostt-formatted - - mkdir -p $TACO_TENSOR_PATH - mkdir -p $SUITESPARSE_FORMATTED_PATH - mkdir -p $FROSTT_FORMATTED_TACO_PATH - mkdir -p $FROSTT_FORMATTED_PATH - - lanka=ON - neva=OFF -elif [ $1 -eq 2 ]; then - lanka=OFF - neva=ON -else - lanka=OFF - neva=OFF -fi - -format_outdir=${FROSTT_FORMATTED_PATH} -basedir=$(pwd) -frosttpath=$FROSTT_PATH -benchout=frostt-bench/sam - -__conda_setup="$('/data/scratch/owhsu/miniconda/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" -if [ $? -eq 0 ]; then - eval "$__conda_setup" -else - if [ -f "/data/scratch/owhsu/miniconda/etc/profile.d/conda.sh" ]; then - . "/data/scratch/owhsu/miniconda/etc/profile.d/conda.sh" - else - export PATH="/data/scratch/owhsu/miniconda/bin:$PATH" - fi -fi -unset __conda_setup -conda activate aha - -mkdir -p "$benchout" -mkdir -p $format_outdir -mkdir -p $TACO_TENSOR_PATH/other-formatted-taco - -make -j8 taco/build NEVA=$neva LANKA=$lanka GEN=ON - -for b in ${!BENCHMARKS[@]}; do - bench=${BENCHMARKS[$b]} - path=$basedir/$benchout/$bench - mkdir -p $basedir/$benchout/$bench - echo "Testing $bench..." - - for t in ${!TENSORS[@]}; do - name=${TENSORS[$t]} - cd $format_outdir - - - if [ "$bench" == "tensor3_innerprod_FINAL" ]; then - echo "Generating input format files for $name..." - - $basedir/compiler/taco/build/bin/taco-test sam.pack_sss012 - $basedir/compiler/taco/build/bin/taco-test sam.pack_other_frostt - python $basedir/scripts/datastructure_tns.py -n $name -f sss012 - python $basedir/scripts/datastructure_tns.py -n $name -f sss012 --other - chmod -R 775 $FROSTT_FORMATTED_PATH - fi - - cd $basedir/sam/sim - - pytest test/final-apps/test_$bench.py --frosttname $name --benchmark-json=$path/$name.json - python $basedir/scripts/converter.py --json_name $path/$name.json - - status=$? - if [ $status -gt 0 ] - then - errors+=("${name}, ${bench}") - fi - - cd $basedir - done - - python $basedir/scripts/bench_csv_aggregator.py $path $basedir/$benchout/frostt_$bench.csv - - echo -e "${RED}Failed tests:" - for i in ${!errors[@]}; do - error=${errors[$i]} - echo -e "${RED}$error," - done - echo -e "${NC}" -done - diff --git a/scripts/sam_suitesparse_runner.sh b/scripts/sam_suitesparse_runner.sh deleted file mode 100755 index 8c806381..00000000 --- a/scripts/sam_suitesparse_runner.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/bin/bash -#SBATCH -N 1 -#SBATCH --mem 120000 -#SBATCH -p lanka-v3 -#SBATCH --exclusive - -set -u - -BENCHMARKS=( -# mat_vecmul_FINAL - matmul_FINAL -# mat_elemadd_FINAL -# mat_elemadd3_FINAL -# mat_residual_FINAL -# mat_mattransmul_FINAL -) - -errors=() -RED='\033[0;31m' -NC='\033[0m' # No Color - -# LANKA -if [ $2 -eq 1 ]; then - export SUITESPARSE_PATH=/data/scratch/changwan/florida_all - export FROSTT_PATH=/data/scratch/owhsu/datasets/frostt - export TACO_TENSOR_PATH=/data/scratch/owhsu/datasets - export SUITESPARSE_FORMATTED_PATH=/data/scratch/owhsu/datasets/suitesparse-formatted - export FROSTT_FORMATTED_TACO_PATH=/data/scratch/owhsu/datasets/frostt-formatted/taco-tensor - export FROSTT_FORMATTED_PATH=/data/scratch/owhsu/datasets/frostt-formatted - - mkdir -p $TACO_TENSOR_PATH - mkdir -p $SUITESPARSE_FORMATTED_PATH - mkdir -p $FROSTT_FORMATTED_TACO_PATH - mkdir -p $FROSTT_FORMATTED_PATH - - lanka=ON - neva=OFF -elif [ $2 -eq 2 ]; then - lanka=OFF - neva=ON -else - lanka=OFF - neva=OFF -fi - -format_outdir=${SUITESPARSE_FORMATTED_PATH} -basedir=$(pwd) -sspath=$SUITESPARSE_PATH -benchout=suitesparse-bench/sam - -__conda_setup="$('/data/scratch/owhsu/miniconda/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" -if [ $? -eq 0 ]; then - eval "$__conda_setup" -else - if [ -f "/data/scratch/owhsu/miniconda/etc/profile.d/conda.sh" ]; then - . "/data/scratch/owhsu/miniconda/etc/profile.d/conda.sh" - else - export PATH="/data/scratch/owhsu/miniconda/bin:$PATH" - fi -fi -unset __conda_setup -conda activate aha - -mkdir -p "$benchout" -mkdir -p $format_outdir -mkdir -p $TACO_TENSOR_PATH/other-formatted-taco - -make -j8 taco/build NEVA=$neva LANKA=$lanka GEN=ON - -for b in ${!BENCHMARKS[@]}; do - bench=${BENCHMARKS[$b]} - path=$basedir/$benchout/$bench - mkdir -p $basedir/$benchout/$bench - echo "Testing $bench..." - - while read line; do - cd $format_outdir - - if [ $2 -eq 1 ]; then - matrix="$sspath/$line/$line.mtx" - elif [ $2 -eq 2 ]; then - matrix="$sspath/$line.mtx" - else - matrix="$sspath/$line.mtx" - fi - - if [ "$bench" == "mat_vecmul_FINAL" ]; then - echo "Generating input format files for $line..." - SUITESPARSE_TENSOR_PATH=$matrix python $basedir/scripts/datastructure_suitesparse.py -n $line - - SUITESPARSE_TENSOR_PATH=$matrix $basedir/compiler/taco/build/bin/taco-test sam.pack_other_ss - python $basedir/scripts/datastructure_tns.py -n $line -f ss01 --other -ss - fi - - cd $basedir/sam/sim - - pytest test/final-apps/test_$bench.py --ssname $line -s --report-stats --benchmark-json=$path/$line.json - python $basedir/scripts/converter.py --json_name $path/$line.json - - status=$? - if [ $status -gt 0 ] - then - errors+=("${line}, ${bench}") - fi - - cd $basedir - done <$1 - - python $basedir/scripts/bench_csv_aggregator.py $path $basedir/$benchout/suitesparse_$bench.csv - - echo -e "${RED}Failed tests:" - for i in ${!errors[@]}; do - error=${errors[$i]} - echo -e "${RED}$error," - done - echo -e "${NC}" -done - diff --git a/scripts/sam_suitesparse_runner_sddmmonly.sh b/scripts/sam_suitesparse_runner_sddmmonly.sh deleted file mode 100644 index 419e17dd..00000000 --- a/scripts/sam_suitesparse_runner_sddmmonly.sh +++ /dev/null @@ -1,113 +0,0 @@ -#!/bin/bash -#SBATCH -N 1 -#SBATCH --mem 120000 -#SBATCH -p lanka-v3 -#SBATCH --exclusive - -set -u - -BENCHMARKS=( - mat_sddmm_FINAL -) - -errors=() -RED='\033[0;31m' -NC='\033[0m' # No Color - -# LANKA -if [ $2 -eq 1 ]; then - export SUITESPARSE_PATH=/data/scratch/strange/tamu_sparse_unpacked - export FROSTT_PATH=/data/scratch/owhsu/datasets/frostt - export TACO_TENSOR_PATH=/data/scratch/owhsu/datasets - export SUITESPARSE_FORMATTED_PATH=/data/scratch/owhsu/datasets/suitesparse-formatted - export FROSTT_FORMATTED_TACO_PATH=/data/scratch/owhsu/datasets/frostt-formatted/taco-tensor - export FROSTT_FORMATTED_PATH=/data/scratch/owhsu/datasets/frostt-formatted - - mkdir -p $TACO_TENSOR_PATH - mkdir -p $SUITESPARSE_FORMATTED_PATH - mkdir -p $FROSTT_FORMATTED_TACO_PATH - mkdir -p $FROSTT_FORMATTED_PATH - - lanka=ON - neva=OFF -elif [ $2 -eq 2 ]; then - lanka=OFF - neva=ON -else - lanka=OFF - neva=OFF -fi - -format_outdir=${SUITESPARSE_FORMATTED_PATH} -basedir=$(pwd) -sspath=$SUITESPARSE_PATH -benchout=suitesparse-bench/sam - -__conda_setup="$('/data/scratch/owhsu/miniconda/bin/conda' 'shell.bash' 'hook' 2> /dev/null)" -if [ $? -eq 0 ]; then - eval "$__conda_setup" -else - if [ -f "/data/scratch/owhsu/miniconda/etc/profile.d/conda.sh" ]; then - . "/data/scratch/owhsu/miniconda/etc/profile.d/conda.sh" - else - export PATH="/data/scratch/owhsu/miniconda/bin:$PATH" - fi -fi -unset __conda_setup -conda activate aha - -mkdir -p "$benchout" -mkdir -p $format_outdir -mkdir -p $TACO_TENSOR_PATH/other-formatted-taco - -make -j8 taco/build NEVA=$neva LANKA=$lanka GEN=ON - -for b in ${!BENCHMARKS[@]}; do - bench=${BENCHMARKS[$b]} - path=$basedir/$benchout/$bench - mkdir -p $basedir/$benchout/$bench - echo "Testing $bench..." - - while read line; do -# cd $format_outdir - - if [ $2 -eq 1 ]; then - matrix="$sspath/$line/$line.mtx" - elif [ $2 -eq 2 ]; then - matrix="$sspath/$line.mtx" - else - matrix="$sspath/$line.mtx" - fi - -# if [ "$bench" == "mat_vecmul_FINAL" ]; then -# echo "Generating input format files for $line..." -# SUITESPARSE_TENSOR_PATH=$matrix python $basedir/scripts/datastructure_suitesparse.py -n $line --o -# -# SUITESPARSE_TENSOR_PATH=$matrix $basedir/compiler/taco/build/bin/taco-test sam.pack_other_ss -# python $basedir/scripts/datastructure_tns.py -n $line -f ss01 --other -ss -# fi - - cd $basedir/sam/sim - - pytest test/final-apps/test_$bench.py --ssname $line -s --benchmark-json=$path/$line.json - python $basedir/scripts/converter.py --json_name $path/$line.json - - status=$? - if [ $status -gt 0 ] - then - errors+=("${line}, ${bench}") - fi - - cd $basedir - done <$1 - - python $basedir/scripts/bench_csv_aggregator.py $path $basedir/$benchout/suitesparse_$bench.csv - - echo -e "${RED}Failed tests:" - for i in ${!errors[@]}; do - error=${errors[$i]} - echo -e "${RED}$error," - done - echo -e "${NC}" -done - diff --git a/scripts/ss_stats.sh b/scripts/ss_stats.sh deleted file mode 100755 index 6b8c3afe..00000000 --- a/scripts/ss_stats.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh -#SBATCH -N 1 -#SBATCH --exclusive -/home/owhsu/anaconda3/condabin/conda init bash -/home/owhsu/anaconda3/condabin/conda activate aha - -rm -rf /home/owhsu/aha/scadi_graph/scripts/logs - -python suitesparse_stats.py --overall -nstop 250 diff --git a/scripts/stats/README.md b/scripts/stats/README.md new file mode 100644 index 00000000..dd1797d3 --- /dev/null +++ b/scripts/stats/README.md @@ -0,0 +1,8 @@ +# Statistics Scripts + +The `scripts/stats/` folder contains scripts used to get general sparse statistics about the datasets. This is useful for the designs + +1. `get_tensor_arrlen.py` - Script that gets the length of the datastructure arrays from the input datasets (to populate CSVs). +2. `suitesparse_stats.sh` - Script that calls `suitesparse_states.py` +3. `suitesparse_stats.py` - File that calcultes certain statistics (e.g size, + len, nnz) of the SuiteSparse data structure arrays (e.g. seg/crd) diff --git a/scripts/get_tensor_arrlen.py b/scripts/stats/get_tensor_arrlen.py similarity index 92% rename from scripts/get_tensor_arrlen.py rename to scripts/stats/get_tensor_arrlen.py index f914198d..0d2df62d 100644 --- a/scripts/get_tensor_arrlen.py +++ b/scripts/stats/get_tensor_arrlen.py @@ -1,8 +1,11 @@ +# python scripts/stats/get_tensor_arrlen.py + import argparse import os import csv +# This is using the old CSF file types def write_csv(path, outpath): with open(outpath, 'w+', newline='') as outcsv: writer = csv.writer(outcsv) diff --git a/scripts/suitesparse_stats.py b/scripts/stats/suitesparse_stats.py similarity index 98% rename from scripts/suitesparse_stats.py rename to scripts/stats/suitesparse_stats.py index 41947ade..bc4b49a0 100644 --- a/scripts/suitesparse_stats.py +++ b/scripts/stats/suitesparse_stats.py @@ -8,7 +8,7 @@ from pathlib import Path -from util import TensorCollectionSuiteSparse, ScipyTensorShifter, \ +from sam.util import TensorCollectionSuiteSparse, ScipyTensorShifter, \ ScipyMatrixMarketTensorLoader, SuiteSparseTensor, safeCastPydataTensorToInts SS_PATH = os.getenv('SUITESPARSE_PATH') diff --git a/scripts/stats/suitesparse_stats.sh b/scripts/stats/suitesparse_stats.sh new file mode 100755 index 00000000..6b3760a0 --- /dev/null +++ b/scripts/stats/suitesparse_stats.sh @@ -0,0 +1,9 @@ +#!/bin/sh +#SBATCH -N 1 +#SBATCH --exclusive + +basedir=$(cwd) + +rm -rf $basedir/scripts/logs + +python suitesparse_stats.py --overall -nstop 250 diff --git a/scripts/suitesparse_memory_model_runner.sh b/scripts/suitesparse_memory_model_runner.sh new file mode 100755 index 00000000..c79e5a81 --- /dev/null +++ b/scripts/suitesparse_memory_model_runner.sh @@ -0,0 +1,44 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH --mem 120000 +#SBATCH -p lanka-v3 +#SBATCH --exclusive + +benchout=memory_model_out + +basedir=$(pwd) +# bench=matmul_ijk_tile_pipeline_final +yaml_fname=memory_config_onyx.yaml +path=$basedir/$benchout + +fname=$1 + +appname=$2 + +echo "Running for suitesparse $fname" + +export SAM_HOME=$basedir +# export TILED_SUITESPARSE_FORMATTED_PATH=${SAM_HOME}/tiles/matmul_ijk/formatted +# export TILED_OUTPUT_PATH=${SAM_HOME}/tiles/matmul_ijk/output/ + +export TILED_SUITESPARSE_FORMATTED_PATH=${SAM_HOME}/tiles/${appname}/formatted +export TILED_OUTPUT_PATH=${SAM_HOME}/tiles/${appname}/output/ + +pushd . + +mkdir -p $path + +mkdir -p $basedir/tiles/ +rm -rf $basedir/tiles/* + +./scripts/tiling/prepare_files.sh $fname.mtx $yaml_fname $fname $appname + +cd $basedir/sam/sim +# python3 -m pytest test/advanced-simulator/test_$bench.py --ssname $fname -s --check-gold --skip-empty --nbuffer --yaml_name=$yaml_fname --benchmark-json=$path/mem_model_$fname.json +# pytest test/advanced-simulator/test_$bench.py --ssname $fname -s --check-gold --skip-empty --nbuffer --yaml_name=$yaml_fname --benchmark-json=$path/mem_model_$fname.json + +# python3 $basedir/scripts/converter.py --json_name $path/mem_model_$fname.json + +python3 $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/$benchout/$bench.csv + +popd diff --git a/scripts/tensor_names/README.md b/scripts/tensor_names/README.md new file mode 100644 index 00000000..8cf69143 --- /dev/null +++ b/scripts/tensor_names/README.md @@ -0,0 +1,19 @@ +# Tensor Names + +the `scripts/tensor_names` folder contains text files with lists of SuiteSparse matrix +names that are used for different scenarios + +The scripts in this folder are: +1. `suitesparse.txt` - All (ANY type) suitesparse matrices (alphabetical order) +2. `suitesparse_ci.txt` - All suitesparse matrices that are in the `sam/data/` folder in the SAM repo for CI purposes +3. `suitesparse_real.txt` - All real suitesparse matrices (alphabetical order) +4. `suitesparse_valid.txt` - All REAL and INTEGER suitesparse matrices that fit + in memory on LANKA (MIT) for at least ONE test from the original SAM paper + passed (unordered) +5. `suitesparse_valid_all.txt` - All real and integer suitesparse matrices that + fit in memory on LANKA (MIT) for ALL tests from the original SAM paper + (ordered by dense dimension) +6. `suitesparse_valid_large50.txt` - The largest 50 (by dense dimension) suitesparse matrices that passed ALL tests from the original SAM paper +7. `suitesparse_valid_mid50.txt` - The median 50 (by dense dimension) suitesparse matrices that passed ALL tests from the original SAM paper +8. `suitesparse_valid_small50.txt` - The smallest 50 (by dense dimension) suitesparse matrices that passed ALL tests from the original SAM paper +9. `temp_*.txt` - `suitesparse_valid.txt` split into various files for running tests in parallel diff --git a/scripts/divvy_runs.py b/scripts/tensor_names/divvy_runs.py similarity index 100% rename from scripts/divvy_runs.py rename to scripts/tensor_names/divvy_runs.py diff --git a/scripts/tensor_names/spmv_iter_matrices.txt b/scripts/tensor_names/spmv_iter_matrices.txt new file mode 100644 index 00000000..f113329a --- /dev/null +++ b/scripts/tensor_names/spmv_iter_matrices.txt @@ -0,0 +1,9 @@ +bcsstm26 +tols2000 +west2021 +adder_dcop_30 +adder_trans_02 +watt_2 +rajat12 +G42 +G30 diff --git a/scripts/tile_ext.sh b/scripts/tile_ext.sh deleted file mode 100755 index 2c8a54e3..00000000 --- a/scripts/tile_ext.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -BENCHMARKS=( - matmul_ikj -) - -# THIS FILE MUST BE RUN FROM sam/ location -sspath=$SUITESPARSE_PATH - -basedir=$(pwd) - -ext_path=$basedir/extensor_mtx/$1 - -echo "$ext_path" - -for b in ${!BENCHMARKS[@]}; do - bench=${BENCHMARKS[$b]} - path=$basedir/$benchout/$bench - mkdir -p $basedir/$benchout/$bench - echo "Testing $bench..." - - rm -rf $basedir/tiles/* - - echo "Tiling mtx file" - python $basedir/sam/sim/src/tiling/tile.py --extensor --input_path $ext_path --cotile $bench --multilevel --hw_config $basedir/sam/sim/src/tiling/$2 - - echo "Generating input format files for $ext_path..." - python $basedir/scripts/datastructure_suitesparse.py -n temp -hw -b $bench --input $basedir/tiles/$bench/mtx/ --output_dir_path $basedir/tiles/$bench/formatted --tiles - -done - diff --git a/scripts/tiling/README.md b/scripts/tiling/README.md new file mode 100644 index 00000000..47be7b1e --- /dev/null +++ b/scripts/tiling/README.md @@ -0,0 +1,33 @@ +# Tiling Scripts + +The `scripts/tiling/` folder contains scripts used to tile datasets and run tiling benchmarks. + +1. `advanced_simulator_runner.sh` - Script that formats, runs, and generates a + CSV for the tiled simulation (aka advanced simulator). +2. `clean_memory_model.sh` - Helper script to remove all generated files from + the tiled SAM flow +3. `ext_runner.sh` - Script that runs the Extensor configuration of for + inner-product matmul, used to recreate a graph in the ASPLOS 2023 SAM paper. +4. `few_points_memory_model_runner.sh` - Script that runs a restricted set of + experiments (8 points) from Figure 15 on pg. 12 of the SAM ASPLOS 2023 paper +(used in the ASPLOS 2023 artifact evaluation). +5. `full_ext_runner.sh` - Script that runs `ext_runner.sh` for all combinations + of NNZ and Dimension points. +6. `full_memory_model_runner.sh` - Script that runs the full set of experiments + to generate Figure 15 on pg. 12 of the SAM ASPLOS 2023 paper (used in the +ASPLOS 2023 artifact evaluation). +7. `generate_gold_matmul_tiled.py` - Script that generates the golden matmul + partial sums for each tile. +8. `generate_sparsity_sweep_mem_model.sh` - Script that generates pre-tiled + synthetic matrices. Used in the ASPLOS 2023 SAM artifact evaluation. +9. `prepare_files_no_gold.sh` - Script that runs `tile_ext.sh` for the extensor + configuration +10. `prepare_files.sh` - Script that runs `tile_ext.sh` and also prepares the + gold files using `generate_gold_matmul_tiled.py +11. `single_point_memory_model_runner.sh` - Script that runs a single point + from Figure 15 on pg. 12 of the SAM ASPLOS 2023 paper (Used in the ASPLOS +2023 artifact evaluation). +12. `tile_ext.sh` - Script that tiles the input matrices from a directory (like + extensor_mtx). +13. `tile.sh` - Script that tiles the input matrices from a tensor name (like + SuiteSparse matrices). diff --git a/scripts/tiling/advanced_simulator_runner.sh b/scripts/tiling/advanced_simulator_runner.sh new file mode 100755 index 00000000..2dd6edeb --- /dev/null +++ b/scripts/tiling/advanced_simulator_runner.sh @@ -0,0 +1,100 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH --mem 120000 +#SBATCH -p lanka-v3 +#SBATCH --exclusive + +# ./scripts/tiling/advanced_simulator_runner.sh +# where machine is either 0(local), 1(Lanka), or 2(Kiwi/Neva/Lagos) + +set -u + +BENCHMARKS=( +# mat_vecmul_FINAL +# matmul_FINAL +# mat_identity +# mat_identity_back +# matmul_ikj_memory_back +# matmul_ikj_sparse_tiling2 +# matmul_ikj_glb_tile +# matmul_ikj_glb_tile2 +matmul_ikj_tile_pipeline_final +# matmul_ikj_glb_tile_pipeline +# matmul_ikj_glb_no_pipe +# matmul_ikj_input_only +# matmul_ikj_tiled_bcsstm02 +# matmul_ikj_check +# matmul_ikj_tiling +# matmul_ikj_back +# mat_elemmul_FINAL +# mat_elemadd_FINAL +# mat_elemadd3_FINAL +# mat_residual_FINAL +# mat_mattransmul_FINAL +) + +errors=() +RED='\033[0;31m' +NC='\033[0m' # No Color + +basedir=$(pwd) + +sspath=$SUITESPARSE_PATH +benchout=suitesparse-bench_simulator/sam +format_outdir=${SUITESPARSE_FORMATTED_PATH} + +source $basedir/../venv/bin/activate + +mkdir -p "$benchout" +mkdir -p $format_outdir +mkdir -p $TACO_TENSOR_PATH/other-formatted-taco + +for b in ${!BENCHMARKS[@]}; do + bench=${BENCHMARKS[$b]} + path=$basedir/$benchout/$bench + mkdir -p $basedir/$benchout/$bench + echo "Testing $bench..." + + while read line; do + cd $format_outdir + + if [ $2 -eq 1 ]; then + matrix="$sspath/$line/$line.mtx" + elif [ $2 -eq 2 ]; then + matrix="$sspath/$line.mtx" + else + matrix="$sspath/$line.mtx" + fi + + if [ "$bench" == "matmul_ikj" ]; then + echo "Generating input format files for $line..." + SUITESPARSE_TENSOR_PATH=$matrix python $basedir/scripts/formatting/datastructure_suitesparse.py -n $line + + SUITESPARSE_TENSOR_PATH=$matrix $basedir/compiler/taco/build/bin/taco-test sam.pack_other_ss + python $basedir/scripts/formatting/datastructure_frostt.py -n $line -f ss01 --other -ss + fi + + cd $basedir/sam/sim + #python -m cProfile -o test/final-apps/test_$bench.py --ssname $line -s --benchmark-json=$path/$line.json + pytest test/advanced-simulator/test_$bench.py --ssname $line -s --report-stats --check-gold --skip-empty --nbuffer --yaml_name=$3 --benchmark-json=$path/$line.json + # pytest test/advanced-simulator/test_$bench.py --ssname $line -s --report-stats --back --depth=1 --debug-sim --check-gold --benchmark-json=$path/$line.json + # python $basedir/scripts/util/converter.py --json_name $path/$line.json + + status=$? + if [ $status -gt 0 ] + then + errors+=("${line}, ${bench}") + fi + + cd $basedir + done <$1 + + python $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/$benchout/suitesparse_$bench.csv + + echo -e "${RED}Failed tests:" + for i in ${!errors[@]}; do + error=${errors[$i]} + echo -e "${RED}$error," + done + echo -e "${NC}" +done diff --git a/scripts/clean_memory_model.sh b/scripts/tiling/clean_memory_model.sh old mode 100644 new mode 100755 similarity index 100% rename from scripts/clean_memory_model.sh rename to scripts/tiling/clean_memory_model.sh diff --git a/scripts/tiling/ext_runner.sh b/scripts/tiling/ext_runner.sh new file mode 100755 index 00000000..4965ace6 --- /dev/null +++ b/scripts/tiling/ext_runner.sh @@ -0,0 +1,16 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH --mem 120000 +#SBATCH -p lanka-v3 +#SBATCH --exclusive + +# ./scripts/tiling/ext_runner.sh extensor__.mtx + +basedir=$(pwd) +rm -rf $basedir/tiles/* + +./scripts/tiling/tile_ext.sh $1 memory_config_extensor_17M_llb.yaml + +python scripts/tiling/generate_gold_matmul_tiled.py --yaml_name memory_config_extensor_17M_llb.yaml + +./scripts/tiling/advanced_simulator_runner.sh scripts/temp.txt 2 memory_config_extensor_17M_llb.yaml diff --git a/scripts/few_points_memory_model_runner.sh b/scripts/tiling/few_points_memory_model_runner.sh similarity index 63% rename from scripts/few_points_memory_model_runner.sh rename to scripts/tiling/few_points_memory_model_runner.sh index 79811385..5d36eae2 100755 --- a/scripts/few_points_memory_model_runner.sh +++ b/scripts/tiling/few_points_memory_model_runner.sh @@ -4,6 +4,9 @@ #SBATCH -p lanka-v3 #SBATCH --exclusive +# ./few_points_memory_model_runner.sh +# where gold is 0(no gold check) or 1(with gold check) + SECONDS=0 set -u @@ -49,9 +52,9 @@ for b in ${!BENCHMARKS[@]}; do for nnz in ${!NNZ[@]}; do for dim in ${!DIMENSIONS[@]}; do if [ $2 -eq 0 ]; then - ./scripts/prepare_files_no_gold.sh extensor_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.mtx + ./scripts/tiling/prepare_files_no_gold.sh extensor_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.mtx elif [ $2 -eq 1 ]; then - ./scripts/prepare_files.sh extensor_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.mtx + ./scripts/tiling/prepare_files.sh extensor_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.mtx fi bench=${BENCHMARKS[$b]} path=$basedir/$benchout @@ -61,11 +64,11 @@ for b in ${!BENCHMARKS[@]}; do line=random_sparsity cd $basedir/sam/sim if [ $2 -eq 1 ]; then - pytest test/advanced-simulator/test_$bench.py --ssname $line -s --check-gold --skip-empty --nbuffer --yaml_name=$1 --nnz-value=${NNZ[$nnz]} --benchmark-json=$path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json + pytest test/advanced-simulator/test_$bench.py --ssname $line -s --check-gold --skip-empty --nbuffer --memory-model --yaml_name=$1 --nnz-value=${NNZ[$nnz]} --benchmark-json=$path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json else - pytest test/advanced-simulator/test_$bench.py --ssname $line -s --skip-empty --nbuffer --yaml_name=$1 --nnz-value=${NNZ[$nnz]} --benchmark-json=$path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json + pytest test/advanced-simulator/test_$bench.py --ssname $line -s --skip-empty --nbuffer --memory-model --yaml_name=$1 --nnz-value=${NNZ[$nnz]} --benchmark-json=$path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json fi - python $basedir/scripts/converter.py --json_name $path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json + python $basedir/scripts/util/converter.py --json_name $path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json status=$? if [ $status -gt 0 ] @@ -75,7 +78,7 @@ for b in ${!BENCHMARKS[@]}; do cd $basedir done done - python3 $basedir/scripts/bench_csv_aggregator.py $path $basedir/$benchout/$bench.csv + python3 $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/$benchout/$bench.csv echo -e "${RED}Failed tests:" for i in ${!errors[@]}; do diff --git a/scripts/full_ext_runner.sh b/scripts/tiling/full_ext_runner.sh similarity index 72% rename from scripts/full_ext_runner.sh rename to scripts/tiling/full_ext_runner.sh index aca5dd2e..411b2ecd 100755 --- a/scripts/full_ext_runner.sh +++ b/scripts/tiling/full_ext_runner.sh @@ -3,7 +3,8 @@ #SBATCH --mem 120000 #SBATCH -p lanka-v3 #SBATCH --exclusive -#SBATCH --mail-user=oliviahsu1107@gmail.com + +# ./scripts/tiling/full_ext_runner.sh NNZ=( 5000 @@ -27,6 +28,6 @@ DIMENSIONS=( for nnz in ${!NNZ[@]}; do for dim in ${!DIMENSIONS[@]}; do filename=${NNZ[$nnz]}_${DIMENSIONS[$dim]} - ./scripts/ext_runner.sh extensor_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.mtx + ./scripts/tiling/ext_runner.sh extensor_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.mtx done done diff --git a/scripts/full_memory_model_runner.sh b/scripts/tiling/full_memory_model_runner.sh similarity index 63% rename from scripts/full_memory_model_runner.sh rename to scripts/tiling/full_memory_model_runner.sh index 447b7bc4..d785a14c 100755 --- a/scripts/full_memory_model_runner.sh +++ b/scripts/tiling/full_memory_model_runner.sh @@ -3,6 +3,10 @@ #SBATCH --mem 120000 #SBATCH -p lanka-v3 #SBATCH --exclusive + +# full_memory_model_runner.sh +# where gold is 0 (no gold check) or 1 (with gold check) + SECONDS=0 set -u @@ -49,9 +53,9 @@ for b in ${!BENCHMARKS[@]}; do for nnz in ${!NNZ[@]}; do for dim in ${!DIMENSIONS[@]}; do if [ $2 -eq 1 ]; then - ./scripts/prepare_files.sh extensor_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.mtx + ./scripts/tiling/prepare_files.sh extensor_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.mtx elif [ $2 -eq 0 ]; then - ./scripts/prepare_files_no_gold.sh extensor_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.mtx + ./scripts/tiling/prepare_files_no_gold.sh extensor_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.mtx fi bench=${BENCHMARKS[$b]} path=$basedir/$benchout @@ -61,11 +65,11 @@ for b in ${!BENCHMARKS[@]}; do line=random_sparsity cd $basedir/sam/sim if [ $2 -eq 1 ]; then - pytest test/advanced-simulator/test_$bench.py --ssname $line -s --check-gold --skip-empty --nbuffer --yaml_name=$1 --nnz-value=${NNZ[$nnz]} --benchmark-json=$path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json + pytest test/advanced-simulator/test_$bench.py --ssname $line -s --check-gold --skip-empty --nbuffer --memory-model --yaml_name=$1 --nnz-value=${NNZ[$nnz]} --benchmark-json=$path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json else - pytest test/advanced-simulator/test_$bench.py --ssname $line -s --skip-empty --nbuffer --yaml_name=$1 --nnz-value=${NNZ[$nnz]} --benchmark-json=$path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json + pytest test/advanced-simulator/test_$bench.py --ssname $line -s --skip-empty --nbuffer --memory-model --yaml_name=$1 --nnz-value=${NNZ[$nnz]} --benchmark-json=$path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json fi - python $basedir/scripts/converter.py --json_name $path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json + python $basedir/scripts/util/converter.py --json_name $path/${line}_${NNZ[$nnz]}_${DIMENSIONS[$dim]}.json status=$? if [ $status -gt 0 ] @@ -75,7 +79,7 @@ for b in ${!BENCHMARKS[@]}; do cd $basedir done done - python3 $basedir/scripts/bench_csv_aggregator.py $path $basedir/$benchout/$bench.csv + python3 $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/$benchout/$bench.csv echo -e "${RED}Failed tests:" for i in ${!errors[@]}; do diff --git a/scripts/generate_gold_matmul_tiled.py b/scripts/tiling/generate_gold_matmul_tiled.py similarity index 81% rename from scripts/generate_gold_matmul_tiled.py rename to scripts/tiling/generate_gold_matmul_tiled.py index 64ff4946..4b585a19 100644 --- a/scripts/generate_gold_matmul_tiled.py +++ b/scripts/tiling/generate_gold_matmul_tiled.py @@ -9,23 +9,13 @@ import argparse from pathlib import Path +from scripts.util.util import round_sparse - -# FIXME: (owhsu) this should be imported from util -def round_sparse(x): - if 0.0 <= x < 1: - return 1 - elif 0.0 > x > -1: - return -1 - elif x >= 0.0: - return math.floor(x + 0.5) - else: - return math.ceil(x - 0.5) - +app_name = "mat_mattransmul" def generate_gold_matmul_tiled(tile_crd_b, tile_crd_c, dirname, out_format="ss01"): # CSR - formatted_dir = "./tiles/matmul_ikj/mtx" + formatted_dir = f"./tiles/{app_name}/mtx" B_dir = "tensor_B_tile_" for a in tile_crd_b: B_dir += str(a) + "_" @@ -69,6 +59,7 @@ def generate_gold_matmul_tiled(tile_crd_b, tile_crd_c, dirname, out_format="ss01 itr += 1 C_scipy = C_scipy.tocsc() gold_nd = (B_scipy @ C_scipy) + # gold_nd = B_scipy.dot(C_scipy) gold_out = gold_nd.tocoo() assert tile_crd_b[1] == tile_crd_c[0] and tile_crd_b[3] == tile_crd_c[2] scipy.io.mmwrite( @@ -78,16 +69,11 @@ def generate_gold_matmul_tiled(tile_crd_b, tile_crd_c, dirname, out_format="ss01 if __name__ == "__main__": parser = argparse.ArgumentParser(description="Generate tiled output gold") - parser.add_argument("--yaml_name", type=str, default="memory_config_real.yaml") + parser.add_argument("--yaml_name", type=str, default="memory_config_onyx.yaml") args = parser.parse_args() - outdir = "./tiles/matmul_ikj/output/" + outdir = f"./tiles/{app_name}/output/" outpath = Path(outdir) - outpath.mkdir(parents=True, exist_ok=True) - - # generate_gold_matmul_tiled([0, 1, 2, 9], [1, 0, 9, 0], outdir) - - # generate_gold_matmul_tiled([0, 1, 0, 7], [1, 0, 7, 0], outdir) - # quit() with open("/nobackup/rsharma3/Sparsity/simulator/old_sam/sam/tiles/matmul_ikj/tensor_sizes", "rb") as ff: + outpath.mkdir(parents=True) with open("./tiles/matmul_ikj/tensor_sizes", "rb") as ff: sizes_dict_level_full = pickle.load(ff) @@ -95,10 +81,11 @@ def generate_gold_matmul_tiled(tile_crd_b, tile_crd_c, dirname, out_format="ss01 with open("./sam/sim/src/tiling/" + args.yaml_name, "r") as stream: loop_config = yaml.safe_load(stream) + print("sizes_dict_level_full", sizes_dict_level_full) struct = { "i00": 1 + int(sizes_dict_level_full["B"][0]) // (loop_config["Glb_tile_size"] * loop_config["Mem_tile_size"]), - "k00": 1 + int(sizes_dict_level_full["B"][1]) // (loop_config["Glb_tile_size"] * loop_config["Mem_tile_size"]), - "j00": 1 + int(sizes_dict_level_full["C"][1]) // (loop_config["Glb_tile_size"] * loop_config["Mem_tile_size"]), + "k00": 1 + int(sizes_dict_level_full["c"][0]) // (loop_config["Glb_tile_size"] * loop_config["Mem_tile_size"]), + "j00": 1 + int(sizes_dict_level_full["d"][0]) // (loop_config["Glb_tile_size"] * loop_config["Mem_tile_size"]), "i0": loop_config["Glb_tile_size"], "k0": loop_config["Glb_tile_size"], "j0": loop_config["Glb_tile_size"]} print(struct) # quit() diff --git a/scripts/tiling/generate_gold_mattransmul.py b/scripts/tiling/generate_gold_mattransmul.py new file mode 100644 index 00000000..b044a949 --- /dev/null +++ b/scripts/tiling/generate_gold_mattransmul.py @@ -0,0 +1,174 @@ +import scipy +import scipy.sparse +import os +import scipy.io +import numpy as np +import yaml +import math +import pickle +import argparse + +from pathlib import Path +from scripts.util.util import round_sparse + +def generate_gold_mattransmul_tiled(tile_crd_b, tile_crd_c, tile_crd_d, dirname, out_format="ss01"): + # CSR + formatted_dir = f"./tiles/mat_mattransmul/mtx" + + B_dir = "tensor_B_tile_" + for a in tile_crd_b: + B_dir += str(a) + "_" + C_dir = "tensor_c_tile_" + for a in tile_crd_c: + C_dir += str(a) + "_" + d_dir = "tensor_d_tile_" + for a in tile_crd_d: + d_dir += str(a) + "_" + + B_dir = B_dir[0:-1] + ".mtx" + C_dir = C_dir[0:-1] + ".mtx" + d_dir = d_dir[0:-1] + ".mtx" + # print(B_dir, " ", C_dir) + B_filename = os.path.join(formatted_dir, B_dir) + C_filename = os.path.join(formatted_dir, C_dir) + d_filename = os.path.join(formatted_dir, d_dir) + # print() + # print(B_filename) + # print(C_filename) + # print(d_filename) + # print() + if os.path.exists(B_filename) and os.path.exists(C_filename) and os.path.exists(d_filename): + B_scipy = scipy.io.mmread(B_filename) + itr = 0 + # print("\nB_scipy: ", B_scipy) + for i, j, v in zip(B_scipy.row, B_scipy.col, B_scipy.data): + # print(B_scipy.data) + # print(i, " ", j, " ", v) + B_scipy.data[itr] = round_sparse(B_scipy.data[itr]) + # if B_scipy.data[itr] < 1 and B_scipy.data[itr] > 0: + # B_scipy.data[itr] = 1 + # elif B_scipy.data[itr] < 0 and B_scipy.data[itr] > -1: + # B_scipy.data[itr] = -1 + # else: + # B_scipy.data[itr] = int(B_scipy.data[itr]) + itr += 1 + B_scipy = B_scipy.tocsr() + + + C_scipy = scipy.io.mmread(C_filename) + # print(C_filename) + # print("\nC_scipy: ", C_scipy) + # print("___________________") + # print(B_scipy) + itr = 0 + for i, j, v in zip(C_scipy.row, C_scipy.col, C_scipy.data): + C_scipy.data[itr] = round_sparse(C_scipy.data[itr]) + itr += 1 + C_scipy = C_scipy.tocsr() + C_scipy = np.transpose(C_scipy) + + d_scipy = scipy.io.mmread(d_filename) + # print("\nd_scipy: ", d_scipy) + + itr = 0 + for i, j, v in zip(d_scipy.row, d_scipy.col, d_scipy.data): + d_scipy.data[itr] = round_sparse(d_scipy.data[itr]) + + itr += 1 + d_scipy = d_scipy.tocsr() + d_scipy = np.transpose(d_scipy) + + # gold_nd = (B_scipy @ C_scipy) + # gold_nd = B_scipy.dot(C_scipy) + + #constants + alpha = 2 + beta = 2 + + print("B_scipy.shape: ", B_scipy.shape) + print("C_scipy.shape: ", C_scipy.shape) + print("d_scipy.shape: ", d_scipy.shape) + + gold_nd = alpha*(B_scipy @ C_scipy) + beta * d_scipy + # print(gold_nd) + + gold_out = gold_nd.tocoo() + assert tile_crd_b[1] == tile_crd_c[0] and tile_crd_b[3] == tile_crd_c[1] and tile_crd_b[0] == tile_crd_d[0] and tile_crd_b[2] == tile_crd_d[1] + # assert tile_crd_b[1] == tile_crd_c[0] and tile_crd_b[3] == tile_crd_c[2] + scipy.io.mmwrite( + dirname + "out_" + str(tile_crd_b[0]) + "_" + str(tile_crd_b[1]) + "_" + str(tile_crd_b[3]) + "_" + str(tile_crd_b[2]) + "_" + str( + tile_crd_c[0]) + "_" + str(tile_crd_c[1]) + "_" + str(tile_crd_d[0]) + "_" + str(tile_crd_d[1]) + ".mtx", gold_out) + elif os.path.exists(d_filename): + d_scipy = scipy.io.mmread(d_filename) + # print("\nd_scipy: ", d_scipy) + + itr = 0 + for i, j, v in zip(d_scipy.row, d_scipy.col, d_scipy.data): + d_scipy.data[itr] = d_scipy.data[itr] + + itr += 1 + d_scipy = d_scipy.tocsr() + # d_scipy = np.transpose(d_scipy) + + # gold_nd = (B_scipy @ C_scipy) + # gold_nd = B_scipy.dot(C_scipy) + + #constants + alpha = 2 + beta = 2 + + # print(d_scipy.todense()) + gold_nd = beta * d_scipy + # print(gold_nd) + if(np.count_nonzero(gold_nd.todense()) == 0): + print("output is all zero") + return + + gold_out = gold_nd.tocoo() + # assert tile_crd_b[1] == tile_crd_c[0] and tile_crd_b[3] == tile_crd_c[1] and tile_crd_b[0] == tile_crd_d[0] and tile_crd_b[2] == tile_crd_d[1] + # assert tile_crd_b[1] == tile_crd_c[0] and tile_crd_b[3] == tile_crd_c[2] + scipy.io.mmwrite( + dirname + "out_" + str(tile_crd_b[0]) + "_" + str(tile_crd_b[1]) + "_" + str(tile_crd_b[3]) + "_" + str(tile_crd_b[2]) + "_" + str( + tile_crd_c[0]) + "_" + str(tile_crd_c[1]) + "_" + str(tile_crd_d[0]) + "_" + str(tile_crd_d[1]) + ".mtx", gold_out) + + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Generate tiled output gold") + parser.add_argument("--yaml_name", type=str, default="memory_config_real.yaml") + args = parser.parse_args() + outdir = f"./tiles/mat_mattransmul/output/" + outpath = Path(outdir) + outpath.mkdir(parents=True) + + # generate_gold_matmul_tiled([0, 1, 2, 9], [1, 0, 9, 0], outdir) + + # generate_gold_matmul_tiled([0, 1, 0, 7], [1, 0, 7, 0], outdir) + # quit() with open("/nobackup/rsharma3/Sparsity/simulator/old_sam/sam/tiles/matmul_ikj/tensor_sizes", "rb") as ff: + + with open(f"./tiles/mat_mattransmul/tensor_sizes", "rb") as ff: + sizes_dict_level_full = pickle.load(ff) + + with open("./sam/sim/src/tiling/" + args.yaml_name, "r") as stream: + loop_config = yaml.safe_load(stream) + + print() + print("sizes_dict_level_full", sizes_dict_level_full) + print() + print("loop_config", loop_config) + + struct = { + "j00": 1 + int(sizes_dict_level_full["B"][0]) // (loop_config["Glb_tile_size"] * loop_config["Mem_tile_size"]), + "i00": 1 + int(sizes_dict_level_full["c"][0]) // (loop_config["Glb_tile_size"] * loop_config["Mem_tile_size"]), + "i0": loop_config["Glb_tile_size"], "j0": loop_config["Glb_tile_size"]} + + print() + print(struct) + + # print(struct) + # # quit() + for i00 in range(struct["i00"]): + for j00 in range(struct["j00"]): + for i0 in range(struct["i0"]): + for j0 in range(struct["j0"]): + generate_gold_mattransmul_tiled([j00, i00, j0, i0], [i00, i0], [j00, j0], outdir) diff --git a/scripts/generate_sparsity_sweep_mem_model.sh b/scripts/tiling/generate_sparsity_sweep_mem_model.sh similarity index 56% rename from scripts/generate_sparsity_sweep_mem_model.sh rename to scripts/tiling/generate_sparsity_sweep_mem_model.sh index 4ac85ed3..bdd7cdfc 100755 --- a/scripts/generate_sparsity_sweep_mem_model.sh +++ b/scripts/tiling/generate_sparsity_sweep_mem_model.sh @@ -1,8 +1,10 @@ +# ./scripts/tiling/generate_sparsity_sweep_mem_model.sh + SECONDS=0 mkdir extensor_mtx cd extensor_mtx -python ../sam/onyx/synthetic/generate_fixed_nnz_mats.py +python ../sam/onyx/synthetic/generate_fixed_nnz_mats.py --extensor cd .. ELAPSED="Elapsed: $(($SECONDS / 3600))hrs $((($SECONDS / 60) % 60))min $(($SECONDS % 60))sec" printf "$ELAPSED" diff --git a/scripts/tiling/prepare_files.sh b/scripts/tiling/prepare_files.sh new file mode 100755 index 00000000..cab4ec41 --- /dev/null +++ b/scripts/tiling/prepare_files.sh @@ -0,0 +1,18 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH --mem 120000 +#SBATCH -p lanka-v3 +#SBATCH --exclusive + +# ./scripts/tiling/prepare_files.sh extensor__.mtx + +appname=$3 +testname=$4 + +basedir=$(pwd) + +rm -rf $basedir/tiles/* + +./scripts/tiling/tile_ext.sh $1 memory_config_extensor_17M_llb.yaml $appname $testname + +# python3 scripts/tiling/generate_gold_matmul_tiled.py --yaml_name memory_config_extensor_17M_llb.yaml diff --git a/scripts/tiling/prepare_files_no_gold.sh b/scripts/tiling/prepare_files_no_gold.sh new file mode 100755 index 00000000..719d668c --- /dev/null +++ b/scripts/tiling/prepare_files_no_gold.sh @@ -0,0 +1,12 @@ +#!/bin/bash +#SBATCH -N 1 +#SBATCH --mem 120000 +#SBATCH -p lanka-v3 +#SBATCH --exclusive + +# ./scripts/tiling/prepare_files_no_gold.sh extensor__.mtx + +basedir=$(pwd) +rm -rf $basedir/tiles/* + +$basedir/scripts/tiling/tile_ext.sh $1 memory_config_extensor_17M_llb.yaml diff --git a/scripts/single_point_memory_model_runner.sh b/scripts/tiling/single_point_memory_model_runner.sh similarity index 62% rename from scripts/single_point_memory_model_runner.sh rename to scripts/tiling/single_point_memory_model_runner.sh index e666769d..30b5ee4c 100755 --- a/scripts/single_point_memory_model_runner.sh +++ b/scripts/tiling/single_point_memory_model_runner.sh @@ -4,6 +4,8 @@ #SBATCH -p lanka-v3 #SBATCH --exclusive +# ./scripts/tiling/single_point_memory_model_runner.sh extensor__.mtx + benchout=memory_model_out basedir=$(pwd) @@ -31,13 +33,13 @@ mkdir -p $path mkdir -p $basedir/tiles/ rm -rf $basedir/tiles/* -./scripts/prepare_files.sh $fname +./scripts/tiling/prepare_files.sh $fname cd $basedir/sam/sim -pytest test/advanced-simulator/test_$bench.py --ssname $line -s --check-gold --skip-empty --nbuffer --yaml_name=$yaml_fname --nnz-value=$nnz --benchmark-json=$path/${line}_${nnz}_${dim}.json +pytest test/advanced-simulator/test_$bench.py --ssname $line -s --check-gold --skip-empty --nbuffer --yaml_name=$yaml_fname --memory-model --nnz-value=$nnz --benchmark-json=$path/${line}_${nnz}_${dim}.json -python $basedir/scripts/converter.py --json_name $path/${line}_${nnz}_${dim}.json +python $basedir/scripts/util/converter.py --json_name $path/${line}_${nnz}_${dim}.json -python3 $basedir/scripts/bench_csv_aggregator.py $path $basedir/$benchout/$bench.csv +python3 $basedir/scripts/util/bench_csv_aggregator.py $path $basedir/$benchout/$bench.csv popd diff --git a/scripts/tile.sh b/scripts/tiling/tile.sh similarity index 69% rename from scripts/tile.sh rename to scripts/tiling/tile.sh index 8b735289..9f71c94a 100755 --- a/scripts/tile.sh +++ b/scripts/tiling/tile.sh @@ -1,5 +1,7 @@ #!/bin/bash +# ./scripts/tiling/tile.sh + BENCHMARKS=( matmul_ikj ) @@ -22,7 +24,7 @@ for b in ${!BENCHMARKS[@]}; do python $basedir/sam/sim/src/tiling/tile.py --input_tensor $line --cotile $bench --multilevel --hw_config $basedir/sam/sim/src/tiling/$2 echo "Generating input format files for $line..." - python $basedir/scripts/datastructure_suitesparse.py -n $line -hw -b $bench --input $basedir/tiles/$bench/mtx/ --output_dir_path $basedir/tiles/$bench/formatted --tiles + python $basedir/scripts/formatting/datastructure_suitesparse.py -n $line -hw -b $bench --input $basedir/tiles/$bench/mtx/ --output_dir_path $basedir/tiles/$bench/formatted --tiles done <$1 done diff --git a/scripts/tiling/tile_ext.sh b/scripts/tiling/tile_ext.sh new file mode 100755 index 00000000..9c727db8 --- /dev/null +++ b/scripts/tiling/tile_ext.sh @@ -0,0 +1,51 @@ +#!/bin/bash + +# THIS FILE MUST BE RUN FROM sam/ location +# ./scripts/tiling/tile_ext.sh + +BENCHMARKS=( +# matmul_ikj + # mat_mattransmul + # mat_sddmm + # mat_vecmul_ij + # mat_vecmul_ij + # mat_residual + # mat_elemadd3 + $4 + # mat_mask_tri + # mat_vecmul_iter + # mat_elemadd +) + +appname=$3 + +echo "APP NAME IS : $4" +# exit 0 + +sspath=$SUITESPARSE_PATH + +basedir=$(pwd) + +tiles_path=$basedir/extensor_mtx/$1 + +echo "$tiles_path" + +for b in ${!BENCHMARKS[@]}; do + bench=${BENCHMARKS[$b]} + path=$basedir/$benchout/$bench + mkdir -p $basedir/$benchout/$bench + echo "Testing $bench..." + + rm -rf $basedir/tiles/* + + echo "Tiling mtx file" + # python $basedir/sam/sim/src/tiling/tile.py --extensor --input_path $tiles_path --cotile $bench --multilevel --hw_config $basedir/sam/sim/src/tiling/$2 + python3 ./sam/sim/src/tiling/tile.py --tensor_type ss --input_tensor $appname --cotile $bench --multilevel --hw_config ./sam/sim/src/tiling/memory_config_onyx.yaml --higher_order + + echo "Generating input format files for $tiles_path..." + python3 $basedir/scripts/formatting/datastructure_suitesparse.py -n temp -hw -b $bench --input $basedir/tiles/$bench/mtx/ --output_dir_path $basedir/tiles/$bench/formatted --tiles + + # $basedir/compiler/taco/build/bin/taco-test sam.pack_ss01 + # python3 $basedir/scripts/formatting/datastructure_tns.py -n rel5 -f ss01 -b $bench -hw +done + diff --git a/scripts/util/README.md b/scripts/util/README.md new file mode 100644 index 00000000..21f3936f --- /dev/null +++ b/scripts/util/README.md @@ -0,0 +1,10 @@ +# Utilities + +The `scripts/util` folder contains util.py (shared utility functions) and +shared utility scripts used to aggregate and format csv data. + +1. `util.py` - List of python util code needed by the Python scripts under `sam/scripts` +2. `bench_csv_aggregator.py` - Script that aggregates all of the output CSVs. + This is useful since CPU tests are run using googlebench potentially one + tensor at a time (to run tests in parallel), which will produce one CSV per tensor. +3. `converter.py` - Converts JSON to CSV. diff --git a/scripts/generate_extensor_synthetic.py b/scripts/util/__init__.py similarity index 100% rename from scripts/generate_extensor_synthetic.py rename to scripts/util/__init__.py diff --git a/scripts/bench_csv_aggregator.py b/scripts/util/bench_csv_aggregator.py similarity index 98% rename from scripts/bench_csv_aggregator.py rename to scripts/util/bench_csv_aggregator.py index 58a6f1f1..c24b7789 100644 --- a/scripts/bench_csv_aggregator.py +++ b/scripts/util/bench_csv_aggregator.py @@ -24,7 +24,7 @@ def aggregateTacoBenches(folder, outfile, taco=False, labelSet=None): # Discard the first 9 lines. This corresponds to the # google-benchmark generated header. if taco: - for i in range(0, 10): + for i in range(0, 9): f.readline() # Open the rest of the file as a CSV. reader = csv.reader(f) diff --git a/scripts/converter.py b/scripts/util/converter.py similarity index 100% rename from scripts/converter.py rename to scripts/util/converter.py diff --git a/scripts/util.py b/scripts/util/util.py similarity index 99% rename from scripts/util.py rename to scripts/util/util.py index 44d7323c..8332ba1b 100644 --- a/scripts/util.py +++ b/scripts/util/util.py @@ -496,7 +496,7 @@ def writeout_separate_vec(self, vec, dir_path, tensorname, format_str="s0", hw=T else: filename = os.path.join(vec_dir, "tensor_" + tensorname + "_mode_0_seg") with open(filename, "w") as ofile: - ofile.write(array_newline_str([0, len(vec_sp) + 1])) + ofile.write(array_newline_str([0, len(vec_sp)])) if not hw: filename = os.path.join(vec_dir, tensorname + "0_crd.txt") @@ -535,7 +535,7 @@ def writeout_separate_vec(self, vec, dir_path, tensorname, format_str="s0", hw=T ofile.write(array_newline_str(vec_shape)) def writeout_separate_sparse_only(self, coo, dir_path, tensorname, format_str="ss01", hw=True): - + if format_str == "ss01": dcsr_dir = Path(dir_path) dcsr_dir.mkdir(parents=True, exist_ok=True, mode=0o777) diff --git a/setup_tiling_mat.py b/setup_tiling_mat.py new file mode 100644 index 00000000..da8b082f --- /dev/null +++ b/setup_tiling_mat.py @@ -0,0 +1,79 @@ +import subprocess +import glob +import shutil +import os +import re +import sys + +from sam.util import SUITESPARSE_PATH + +#Usage: python3 setup_tiling_mat.py + + +## PARAMS ###################################################################### +data = [sys.argv[2]] +tilesizes = [int(sys.argv[3])] +app_name = sys.argv[1] +docker_path = sys.argv[4] + +print("TILESIZES: ", tilesizes) +print("DATA: ", data) +############################################################################### + +def write_to_line(file_path, line_number, new_content): + with open(file_path, 'r') as file: + lines = file.readlines() + + if line_number > len(lines) or line_number < 1: + # Line number is out of range + return + + lines[line_number - 1] = new_content + '\n' + + with open(file_path, 'w') as file: + file.writelines(lines) + +def replace_ones_with_zeros(mtx_file): + with open(mtx_file, 'r') as file: + lines = file.readlines() + + new_lines = [] + for line in lines: + values = line.split() + if len(values) >= 3: + values[2] = '0' + new_lines.append(' '.join(values)) + + with open(mtx_file, 'w') as file: + file.writelines(new_lines) + + +i = 0 +for datum in data: + tilesize = tilesizes[i] + + yaml_file = "sam/sim/src/tiling/memory_config_onyx.yaml" + mem_tile_line = f"Mem_tile_size: {tilesize}" + print(mem_tile_line) + write_to_line(yaml_file, 19, mem_tile_line) + + rmdir = f"rm -rf tiles/{app_name}" + os.system(rmdir) + + print(f"{SUITESPARSE_PATH}/{datum}.mtx") + mtx_file = glob.glob(f"{SUITESPARSE_PATH}/{datum}.mtx")[0] + os.makedirs("extensor_mtx", exist_ok=True) + shutil.copy(mtx_file,f"extensor_mtx/{datum}.mtx") + + command = f"./scripts/suitesparse_memory_model_runner.sh {datum} {app_name}" + os.system(command) + + docker_clean = f"docker exec {docker_path} rm -r /aha/garnet/tiles_{app_name}_{datum}" + print(docker_clean) + os.system(docker_clean) + + docker_copy_command = f"docker cp tiles {docker_path}:/aha/garnet/tiles_{app_name}_{datum}" + print(docker_copy_command) + os.system(docker_copy_command) + + i = i+1 diff --git a/setup_tiling_tensors.py b/setup_tiling_tensors.py new file mode 100644 index 00000000..15d73c98 --- /dev/null +++ b/setup_tiling_tensors.py @@ -0,0 +1,23 @@ +import numpy as np +import os +import glob +import shutil +from scripts.util.util import FormatWriter, InputCacheSuiteSparse + +#### PARAMS #### +tile = True +app_name = "tensor3_ttv" +vector_names = ['c'] +############## + +tiled_tensors = glob.glob(f"tiles/{app_name}/mtx/*.tns") +formatwriter = FormatWriter() +inputCache = InputCacheSuiteSparse() + +for tensor in tiled_tensors: + if any(x in tensor for x in vector_names): + #vector + inputCache.load(tensor) + formatwriter.writeout_separate_sparse_only() + else: + print("regular 3d tensors can be packed and tiled") \ No newline at end of file diff --git a/spmv_iter_matrices.txt b/spmv_iter_matrices.txt new file mode 100644 index 00000000..f113329a --- /dev/null +++ b/spmv_iter_matrices.txt @@ -0,0 +1,9 @@ +bcsstm26 +tols2000 +west2021 +adder_dcop_30 +adder_trans_02 +watt_2 +rajat12 +G42 +G30 diff --git a/spmv_sparsity_sweep.py b/spmv_sparsity_sweep.py new file mode 100644 index 00000000..8847e00b --- /dev/null +++ b/spmv_sparsity_sweep.py @@ -0,0 +1,42 @@ +import numpy as np +import scipy.io as sio +import scipy.sparse as sp +import os +import random + +num_rows = 10 +num_cols = 10 +density = 0.1 + +seed_value = 100 +random.seed(seed_value) +np.random.seed(seed_value) + +if not os.path.exists('spmv_sparsity_sweep'): + os.makedirs('spmv_sparsity_sweep') +else: + os.system("rm -rf spmv_sparsity_sweep/*") + +if not os.path.exists('spmv_sparsity_sweep/MAT_FILES'): + os.makedirs('spmv_sparsity_sweep/MAT_FILES') +else: + os.system("rm -rf spmv_sparsity_sweep/MAT_FILES/*") + os.makedirs('spmv_sparsity_sweep/MAT_FILES') + +if not os.path.exists('spmv_sparsity_sweep/MTX_FILES'): + os.makedirs('spmv_sparsity_sweep/MTX_FILES') +else: + os.system("rm -rf spmv_sparsity_sweep/MTX_FILES/*") + os.makedirs('spmv_sparsity_sweep/MTX_FILES') + +matrix = sp.random(num_rows, num_cols, density, data_rvs=np.ones, random_state=seed_value) +print(matrix) + +probability = 0.7 # Adjust this value to control the ratio of 1s to 0s in vector +vector = np.random.choice([0, 1], size=num_cols, p=[1 - probability, probability]) +print(vector) + +sio.mmwrite('spmv_sparsity_sweep/MTX_FILES/matrix.mtx', matrix) + +sio.savemat('spmv_sparsity_sweep/MAT_FILES/matrix.mat', {'matrix': matrix}) +sio.savemat('spmv_sparsity_sweep/MAT_FILES/vector.mat', {'vector': vector}) diff --git a/tile_pairing.py b/tile_pairing.py new file mode 100644 index 00000000..8af7000b --- /dev/null +++ b/tile_pairing.py @@ -0,0 +1,501 @@ +import shutil +import glob +import subprocess +import os +import json + +# test = "bcsstm26" +# test = "rel5" +test = "qiulp" +# test = "adder_dcop_30" +# test = "n4c6-b1" +# app_name = "mat_residual" +# app_name = "matmul_ijk" +# app_name = "matmul_ijk" +# app_name = "mat_mattrpython3ansmul" +app_name = "mat_elemmul" +const_val = 2 # only for mat_mattransmul + + +tiles_accumulation = {} + +b_tensors = glob.glob(f"/home/avb03/sam/tiles/{app_name}/formatted/tensor_B*") +c_tensors = glob.glob(f"/home/avb03/sam/tiles/{app_name}/formatted/tensor_C*") +d_tensors = glob.glob(f"/home/avb03/sam/tiles/{app_name}/formatted/tensor_D*") + +print("b_tensors: ", b_tensors) +print("c_tensors: ", c_tensors) +print("d_tensors: ", d_tensors) + +# b_tensors = glob.glob(f"/aha/garnet/tiles_{app_name}_{test}/formatted/tensor_B*") +# c_tensors = glob.glob(f"/aha/garnet/tiles_{app_name}_{test}/formatted/tensor_C*") + +b_vec_tensors = glob.glob(f"/aha/garnet/tiles_{app_name}_{test}/{app_name}/formatted/tensor_b*") +c_vec_tensors = glob.glob(f"/home/avb03/sam/tiles/{app_name}/formatted/tensor_c*") +print("c_vec_tensors: ", c_vec_tensors) +d_vec_tensors = glob.glob(f"/aha/garnet/tiles_{app_name}_{test}/{app_name}/formatted/tensor_d*") + +d_loc_paired = [] +b_loc_paired = [] + +if not os.path.exists("SPARSE_TESTS/MAT_TMP_DIR"): + os.makedirs("SPARSE_TESTS/MAT_TMP_DIR") + +os.system(f"rm -rf SPARSE_TESTS/{app_name}*") +os.system(f"rm -rf SPARSE_TESTS/MAT_TMP_DIR/tile*") + +tile = 0 + +os.chdir("SPARSE_TESTS") + +if app_name == "matmul_ijk": + for b in b_tensors: + for c in c_tensors: + tile_str = "tile" + str(tile) + b_loc = b[-7:] + c_loc = c[-7:] + b_loc = b_loc.split("_") + c_loc = c_loc.split("_") + if(b_loc[1] == c_loc[0] and b_loc[3] == c_loc[2]): + print(b, c) + + if b_loc[2] not in tiles_accumulation: + tiles_accumulation[b_loc[2]] = [] + + tiles_accumulation[b_loc[2]].append(tile_str) + + if not os.path.exists(f"./MAT_TMP_DIR/{tile_str}"): + os.mkdir(f"./MAT_TMP_DIR/{tile_str}") + + shutil.copy(f"{b}/B0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_0_crd") + shutil.copy(f"{b}/B0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_0_seg") + + shutil.copy(f"{b}/B1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_1_crd") + shutil.copy(f"{b}/B1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_1_seg") + + shutil.copy(f"{b}/B_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_vals") + + shutil.copy(f"{b}/B_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_shape") + + shutil.copy(f"{c}/C0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_crd") + shutil.copy(f"{c}/C0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_seg") + + shutil.copy(f"{c}/C1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_crd") + shutil.copy(f"{c}/C1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_seg") + + shutil.copy(f"{c}/C_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_vals") + + shutil.copy(f"{c}/C_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_shape") + + tile = tile + 1 +elif app_name == "mat_elemadd" or app_name == "mat_elemmul": + for b in b_tensors: + for c in c_tensors: + tile_str = "tile" + str(tile) + b_loc = b[-7:] + c_loc = c[-7:] + b_loc = b_loc.split("_") + c_loc = c_loc.split("_") + if(b_loc == c_loc): + print(b, c) + if not os.path.exists(f"./MAT_TMP_DIR/{tile_str}"): + os.mkdir(f"./MAT_TMP_DIR/{tile_str}") + shutil.copy(f"{b}/B0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_0_crd") + shutil.copy(f"{b}/B0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_0_seg") + + shutil.copy(f"{b}/B1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_1_crd") + shutil.copy(f"{b}/B1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_1_seg") + + shutil.copy(f"{b}/B_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_vals") + + shutil.copy(f"{b}/B_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_shape") + + shutil.copy(f"{c}/C0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_crd") + shutil.copy(f"{c}/C0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_seg") + + shutil.copy(f"{c}/C1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_crd") + shutil.copy(f"{c}/C1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_seg") + + shutil.copy(f"{c}/C_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_vals") + + shutil.copy(f"{c}/C_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_shape") + + # subprocess.call(["aha", + # "regress", + # "fast"], + # text=True) + + # shutil.copy("/aha/garnet/SPARSE_TESTS/GLB_DIR/matmul_ijk_combined_seed_tile1/output_gold.npy", "/aha/garnet/SPARSE_TESTS/GLB_DIR/matmul_ijk_combined_seed_tile1/bin") + # shutil.copytree("/aha/garnet/SPARSE_TESTS/GLB_DIR/matmul_ijk_combined_seed_tile1/bin", f"/aha/garnet/SPARSE_TESTS/{tile_str}") + tile = tile + 1 + # print("we are on tile ", tile) +elif app_name == "mat_mattransmul": + for b in b_tensors: + for c in c_vec_tensors: + for d in d_vec_tensors: + tile_str = "tile" + str(tile) + b_loc = b[-7:] + c_loc = c[-3:] + d_loc = d[-3:] + + b_loc = b_loc.split("_") + c_loc = c_loc.split("_") + d_loc = d_loc.split("_") + + if(b_loc[1] == c_loc[0] and b_loc[3] == c_loc[1] and b_loc[0] == d_loc[0] and b_loc[2] == d_loc[1]): + # if(b_loc[1] == d_loc[0] and b_loc[3] == d_loc[1] and b_loc[0] == c_loc[0] and b_loc[2] == c_loc[1]): + d_loc_paired.append(d_loc) + + print(f"\n ----- TILE {tile} ----- \n") + print("B is: ", b) #in build_tb, B == C, c == d, d == f. (#FIXME: change build_tb) + print("C is: ", c) + print("d is: ", d) + print(f"\n ----- TILE {tile} ----- \n") + if not os.path.exists(f"./MAT_TMP_DIR/{tile_str}"): + os.mkdir(f"./MAT_TMP_DIR/{tile_str}") + + shutil.copy(f"{b}/B0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_crd") + shutil.copy(f"{b}/B0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_seg") + + shutil.copy(f"{b}/B1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_crd") + shutil.copy(f"{b}/B1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_seg") + + shutil.copy(f"{b}/B_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_vals") + + shutil.copy(f"{b}/B_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_shape") + + shutil.copy(f"{c}/c1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_1_crd") + shutil.copy(f"{c}/c1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_1_seg") + + shutil.copy(f"{c}/c0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_0_crd") + shutil.copy(f"{c}/c0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_0_seg") + + shutil.copy(f"{d}/d1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_1_crd") + shutil.copy(f"{d}/d1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_1_seg") + + shutil.copy(f"{d}/d0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_0_crd") + shutil.copy(f"{d}/d0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_0_seg") + + shutil.copy(f"{c}/c_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_vals") + shutil.copy(f"{c}/c_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_shape") + + shutil.copy(f"{d}/d_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_vals") + shutil.copy(f"{d}/d_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_shape") + + with open(f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_vals", 'w') as file: + file.write(str(const_val)) + + with open(f"./MAT_TMP_DIR/{tile_str}/tensor_e_mode_vals", 'w') as file: + file.write(str(const_val)) + + tile = tile + 1 + elif d_loc not in d_loc_paired: + # case: B and c tiles are zero but d is nonzero. We have all d tiles. Just take a B and c tile, copy it and make it zero.' + d_loc_paired.append(d_loc) + print(f"\n ----- TILE D-unpaired {tile} ----- \n") + print("B (zero tile) is: ", b) #in build_tb, B == C, c == d, d == f. (#FIXME: change build_tb) + print("C (zero tile) is: ", c) + print("d is: ", d) + print(f"\n ----- TILE D-unpaired {tile} ----- \n") + + if not os.path.exists(f"./MAT_TMP_DIR/{tile_str}"): + os.mkdir(f"./MAT_TMP_DIR/{tile_str}") + + shutil.copy(f"{b}/B0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_crd") + shutil.copy(f"{b}/B0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_seg") + + shutil.copy(f"{b}/B1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_crd") + shutil.copy(f"{b}/B1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_seg") + + shutil.copy(f"{b}/B_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_vals") + + # clear out C vals + with open(f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_vals", 'r+') as file: + contents = file.read() + contents = contents.replace(contents, str(0)) + file.seek(0) + file.write(contents) + + shutil.copy(f"{b}/B_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_shape") + + shutil.copy(f"{c}/c1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_1_crd") + shutil.copy(f"{c}/c1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_1_seg") + + shutil.copy(f"{c}/c0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_0_crd") + shutil.copy(f"{c}/c0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_0_seg") + + shutil.copy(f"{d}/d1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_1_crd") + shutil.copy(f"{d}/d1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_1_seg") + + shutil.copy(f"{d}/d0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_0_crd") + shutil.copy(f"{d}/d0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_0_seg") + + shutil.copy(f"{c}/c_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_vals") + + # clear out d vals + with open(f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_vals", 'r+') as file: + contents = file.read() + contents = contents.replace(contents, str(0)) + file.seek(0) + file.write(contents) + + shutil.copy(f"{c}/c_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_shape") + + shutil.copy(f"{d}/d_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_vals") + shutil.copy(f"{d}/d_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_f_mode_shape") + + with open(f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_vals", 'w') as file: + file.write(str(const_val)) + + with open(f"./MAT_TMP_DIR/{tile_str}/tensor_e_mode_vals", 'w') as file: + file.write(str(const_val)) + + tile = tile + 1 + print("d_loc_paired: ", d_loc_paired) +elif app_name == "mat_vecmul_ij": + for b in b_tensors: + for c in c_vec_tensors: + tile_str = "tile" + str(tile) + b_loc = b[-7:] + c_loc = c[-3:] + + b_loc = b_loc.split("_") + c_loc = c_loc.split("_") + + # if(b_loc[1] == c_loc[0] and b_loc[3] == c_loc[1] and b_loc[0] == d_loc[0] and b_loc[2] == d_loc[1]): + if(b_loc[1] == c_loc[0] and b_loc[3] == c_loc[1]): + print(b,c) + if not os.path.exists(f"./MAT_TMP_DIR/{tile_str}"): + os.mkdir(f"./MAT_TMP_DIR/{tile_str}") + + shutil.copy(f"{b}/B0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_0_crd") + shutil.copy(f"{b}/B0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_0_seg") + + shutil.copy(f"{b}/B1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_1_crd") + shutil.copy(f"{b}/B1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_1_seg") + + shutil.copy(f"{b}/B_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_vals") + + shutil.copy(f"{b}/B_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_shape") + + # shutil.copy(f"{c}/c1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_c_mode_1_crd") + # shutil.copy(f"{c}/c1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_c_mode_1_seg") + + shutil.copy(f"{c}/c0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_c_mode_0_crd") + shutil.copy(f"{c}/c0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_c_mode_0_seg") + + shutil.copy(f"{c}/c_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_c_mode_vals") + shutil.copy(f"{c}/c_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_c_mode_shape") + + tile = tile + 1 +elif app_name == "mat_residual": + for b in b_vec_tensors: + for c in c_tensors: + for d in d_vec_tensors: + tile_str = "tile" + str(tile) + b_loc = b[-3:] + c_loc = c[-7:] + d_loc = d[-3:] + + b_loc = b_loc.split("_") + c_loc = c_loc.split("_") + d_loc = d_loc.split("_") + + # if(b_loc[1] == c_loc[0] and b_loc[3] == c_loc[1] and b_loc[0] == d_loc[0] and b_loc[2] == d_loc[1]): + if(c_loc[0] == b_loc[0] and c_loc[2] == b_loc[1] and c_loc[1] == d_loc[0] and c_loc[3] == d_loc[1]): + print(b, c, d) + b_loc_paired.append(b_loc) + + if not os.path.exists(f"./MAT_TMP_DIR/{tile_str}"): + os.mkdir(f"./MAT_TMP_DIR/{tile_str}") + + shutil.copy(f"{c}/C0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_crd") + shutil.copy(f"{c}/C0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_seg") + + shutil.copy(f"{c}/C1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_crd") + shutil.copy(f"{c}/C1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_seg") + + shutil.copy(f"{c}/C_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_vals") + + shutil.copy(f"{c}/C_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_shape") + + shutil.copy(f"{b}/b1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_1_crd") + shutil.copy(f"{b}/b1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_1_seg") + + shutil.copy(f"{b}/b0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_0_crd") + shutil.copy(f"{b}/b0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_0_seg") + + shutil.copy(f"{d}/d1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_1_crd") + shutil.copy(f"{d}/d1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_1_seg") + + shutil.copy(f"{d}/d0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_0_crd") + shutil.copy(f"{d}/d0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_0_seg") + + shutil.copy(f"{b}/b_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_vals") + shutil.copy(f"{b}/b_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_shape") + + shutil.copy(f"{d}/d_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_vals") + shutil.copy(f"{d}/d_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_shape") + + tile = tile + 1 + elif b_loc not in b_loc_paired: + b_loc_paired.append(b_loc) + + if not os.path.exists(f"./MAT_TMP_DIR/{tile_str}"): + os.mkdir(f"./MAT_TMP_DIR/{tile_str}") + + shutil.copy(f"{c}/C0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_crd") + shutil.copy(f"{c}/C0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_seg") + + shutil.copy(f"{c}/C1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_crd") + shutil.copy(f"{c}/C1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_seg") + + shutil.copy(f"{c}/C_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_vals") + + # clear out C vals + with open(f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_vals", 'r+') as file: + contents = file.read() + contents = contents.replace(contents, str(0)) + file.seek(0) + file.write(contents) + + shutil.copy(f"{c}/C_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_shape") + + shutil.copy(f"{b}/b1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_1_crd") + shutil.copy(f"{b}/b1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_1_seg") + + shutil.copy(f"{b}/b0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_0_crd") + shutil.copy(f"{b}/b0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_0_seg") + + shutil.copy(f"{d}/d1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_1_crd") + shutil.copy(f"{d}/d1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_1_seg") + + shutil.copy(f"{d}/d0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_0_crd") + shutil.copy(f"{d}/d0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_0_seg") + + shutil.copy(f"{b}/b_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_vals") + shutil.copy(f"{b}/b_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_b_mode_shape") + + shutil.copy(f"{d}/d_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_vals") + shutil.copy(f"{d}/d_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_shape") + + # clear out d vals + with open(f"./MAT_TMP_DIR/{tile_str}/tensor_d_mode_vals", 'r+') as file: + contents = file.read() + contents = contents.replace(contents, str(0)) + file.seek(0) + file.write(contents) + + tile = tile + 1 + +elif app_name == "mat_sddmm": + for b in b_tensors: + for c in c_tensors: + for d in d_tensors: + tile_str = "tile" + str(tile) + + b_loc = b[-7:] + c_loc = c[-7:] + d_loc = d[-7:] + + b_loc = b_loc.split("_") + c_loc = c_loc.split("_") + d_loc = d_loc.split("_") + + # first j, then i (k is a free coordinate) + if(b_loc[0] == d_loc[1] and b_loc[2] == d_loc[3] and b_loc[1] == c_loc[0] and b_loc[3] == c_loc[2]): + print(b, c, d) + if not os.path.exists(f"./MAT_TMP_DIR/{tile_str}"): + os.mkdir(f"./MAT_TMP_DIR/{tile_str}") + + shutil.copy(f"{b}/B0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_0_crd") + shutil.copy(f"{b}/B0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_0_seg") + + shutil.copy(f"{b}/B1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_1_crd") + shutil.copy(f"{b}/B1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_1_seg") + + shutil.copy(f"{b}/B_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_vals") + + shutil.copy(f"{b}/B_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_shape") + + shutil.copy(f"{c}/C0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_crd") + shutil.copy(f"{c}/C0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_seg") + + shutil.copy(f"{c}/C1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_crd") + shutil.copy(f"{c}/C1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_seg") + + shutil.copy(f"{c}/C_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_vals") + + shutil.copy(f"{c}/C_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_shape") + + tile = tile + 1 + +elif app_name == "mat_elemadd3": + for b in b_tensors: + for c in c_tensors: + for d in d_tensors: + tile_str = "tile" + str(tile) + b_loc = b[-7:] + c_loc = c[-7:] + b_loc = b_loc.split("_") + c_loc = c_loc.split("_") + d_loc = d[-7:] + d_loc = d_loc.split("_") + + # if(b_loc == c_loc and b_loc != d_loc): + # b_equal_c_no_d += 1 + # if(c_loc == d_loc and b_loc != c_loc): + # c_equal_d_no_b += 1 + # if(b_loc == d_loc and b_loc != c_loc): + # b_equal_d_no_c += 1 + + if(b_loc == c_loc and b_loc == d_loc): + print(b, c, d) + if not os.path.exists(f"./MAT_TMP_DIR/{tile_str}"): + os.mkdir(f"./MAT_TMP_DIR/{tile_str}") + shutil.copy(f"{b}/B0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_0_crd") + shutil.copy(f"{b}/B0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_0_seg") + + shutil.copy(f"{b}/B1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_1_crd") + shutil.copy(f"{b}/B1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_1_seg") + + shutil.copy(f"{b}/B_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_vals") + + shutil.copy(f"{b}/B_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_B_mode_shape") + + shutil.copy(f"{c}/C0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_crd") + shutil.copy(f"{c}/C0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_0_seg") + + shutil.copy(f"{c}/C1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_crd") + shutil.copy(f"{c}/C1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_1_seg") + + shutil.copy(f"{c}/C_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_vals") + + shutil.copy(f"{c}/C_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_C_mode_shape") + + shutil.copy(f"{d}/D0_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_D_mode_0_crd") + shutil.copy(f"{d}/D0_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_D_mode_0_seg") + + shutil.copy(f"{d}/D1_crd.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_D_mode_1_crd") + shutil.copy(f"{d}/D1_seg.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_D_mode_1_seg") + + shutil.copy(f"{d}/D_vals.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_D_mode_vals") + + shutil.copy(f"{d}/D_shape.txt", f"./MAT_TMP_DIR/{tile_str}/tensor_D_mode_shape") + + # subprocess.call(["aha", + # "regress", + # "fast"], + # text=True) + + # shutil.copy("/aha/garnet/SPARSE_TESTS/GLB_DIR/matmul_ijk_combined_seed_tile1/output_gold.npy", "/aha/garnet/SPARSE_TESTS/GLB_DIR/matmul_ijk_combined_seed_tile1/bin") + # shutil.copytree("/aha/garnet/SPARSE_TESTS/GLB_DIR/matmul_ijk_combined_seed_tile1/bin", f"/aha/garnet/SPARSE_TESTS/{tile_str}") + tile = tile + 1 + # print("we are on tile ", tile) + +print("tiles_accumulation: ", tiles_accumulation) + +with open("../tiles_accumulation.json", "w") as file: + json.dump(tiles_accumulation, file) + +print("there are ", tile, " tiles") \ No newline at end of file