From 0dafd00d3aaf179d45e0c8ccbda1c6527785e25a Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 8 Nov 2023 11:35:55 -0800 Subject: [PATCH 01/20] add support for floating point matrix generation and parsing fp_mul function for pe --- sam/onyx/generate_matrices.py | 9 +++++++-- sam/onyx/hw_nodes/compute_node.py | 3 ++- sam/onyx/parse_dot.py | 4 ++-- 3 files changed, 11 insertions(+), 5 deletions(-) diff --git a/sam/onyx/generate_matrices.py b/sam/onyx/generate_matrices.py index dbba52a1..ff74cb50 100644 --- a/sam/onyx/generate_matrices.py +++ b/sam/onyx/generate_matrices.py @@ -15,7 +15,7 @@ class MatrixGenerator: def __init__(self, name='B', shape=None, sparsity=0.6, format='CSF', dump_dir=None, - tensor=None, value_cap=None, clean=True) -> None: + tensor=None, value_cap=None, clean=True, use_fp=False) -> None: # assert dimension is not None # self.dimension = dimension @@ -24,6 +24,7 @@ def __init__(self, name='B', shape=None, sparsity=0.6, format='CSF', dump_dir=No self.sparsity = sparsity self.format = format self.name = name + self.use_fp = use_fp if value_cap is None: self.value_cap = int(math.pow(2, 8)) - 1 else: @@ -54,7 +55,11 @@ def _create_matrix(self, value_cap=int(math.pow(2, 8)) - 1): ''' Routine to create the actual matrix from the dimension/shape ''' - self.array = numpy.random.randint(low=-1 * value_cap / 2, high=value_cap / 2, size=self.shape) + self.array = numpy.random.uniform(low=-1 * value_cap / 2, high=value_cap / 2, size=self.shape) + if not self.use_fp: + self.array = self.array.astype(int) + print(self.array.dtype) + breakpoint() for idx, x in numpy.ndenumerate(self.array): if random.random() < self.sparsity: self.array[idx] = 0 diff --git a/sam/onyx/hw_nodes/compute_node.py b/sam/onyx/hw_nodes/compute_node.py index ab4e9b5a..6423ae8a 100644 --- a/sam/onyx/hw_nodes/compute_node.py +++ b/sam/onyx/hw_nodes/compute_node.py @@ -8,7 +8,6 @@ def __init__(self, name=None, op=None) -> None: self.num_outputs = 1 self.num_inputs_connected = 0 self.num_outputs_connected = 0 - self.op = op def connect(self, other, edge, kwargs=None): @@ -161,6 +160,8 @@ def configure(self, attributes): op_code = 2 elif c_op == 'max': op_code = 4 + elif c_op == 'fp_mul': + op_code = 5 cfg_kwargs = { 'op': op_code } diff --git a/sam/onyx/parse_dot.py b/sam/onyx/parse_dot.py index 1475f6ec..a7e291b7 100644 --- a/sam/onyx/parse_dot.py +++ b/sam/onyx/parse_dot.py @@ -101,11 +101,11 @@ def map_nodes(self): hw_nt = f"HWNodeType.Repeat" elif n_type == "mul" or n_type == "add" or n_type == "max": hw_nt = f"HWNodeType.Compute" + elif n_type == "fp_mul": + hw_nt = f"HWNodeType.Compute" elif n_type == "reduce": hw_nt = f"HWNodeType.Reduce" elif n_type == "intersect" or n_type == "union": - if n_type == "union": - print("UNION BLOCK") hw_nt = f"HWNodeType.Intersect" elif n_type == "crddrop": hw_nt = f"HWNodeType.Merge" From 6a0b322b0fbdecca641977fd9d102acf643560c8 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 8 Nov 2023 23:33:39 -0800 Subject: [PATCH 02/20] added support for genearting and dumping fp16 matrices --- sam/onyx/fiber_tree.py | 1 - sam/onyx/generate_matrices.py | 79 ++++++++++++++++++++++------------- 2 files changed, 50 insertions(+), 30 deletions(-) diff --git a/sam/onyx/fiber_tree.py b/sam/onyx/fiber_tree.py index 9736f0d2..66525311 100644 --- a/sam/onyx/fiber_tree.py +++ b/sam/onyx/fiber_tree.py @@ -38,7 +38,6 @@ def get_root(self): return self.root_fiber def populate_fiber(self, fiber, sub_tensor): - # Last level detection if len(sub_tensor.shape) == 1: # Finally have just a row, this is the base case...(could be a scalar) diff --git a/sam/onyx/generate_matrices.py b/sam/onyx/generate_matrices.py index ff74cb50..6efdb9d8 100644 --- a/sam/onyx/generate_matrices.py +++ b/sam/onyx/generate_matrices.py @@ -11,6 +11,7 @@ import csv import os from sam.sim.test.test import * +from lassen.utils import bfbin2float, float2bfbin class MatrixGenerator: @@ -44,8 +45,14 @@ def __init__(self, name='B', shape=None, sparsity=0.6, format='CSF', dump_dir=No self.dump_dir = tempfile.gettempdir() if tensor is not None: - self.array = tensor - self.shape = self.array.shape + if not tensor.dtype == numpy.float32: + self.array = tensor + self.shape = self.array.shape + else: + self.array = tensor + for idx, x in numpy.ndenumerate(self.array): + self.array[idx] = bfbin2float(float2bfbin(x)) + self.shape = self.array.shape else: assert shape is not None self._create_matrix(value_cap=self.value_cap) @@ -56,10 +63,17 @@ def _create_matrix(self, value_cap=int(math.pow(2, 8)) - 1): Routine to create the actual matrix from the dimension/shape ''' self.array = numpy.random.uniform(low=-1 * value_cap / 2, high=value_cap / 2, size=self.shape) + # convert to float32 for ease of conversion to bfloat16 + self.array = self.array.astype(numpy.float32) if not self.use_fp: self.array = self.array.astype(int) - print(self.array.dtype) - breakpoint() + else: + # convert to bfloat16 by truncating the trailing fraction bits + # converting it to floating point number + for idx, x in numpy.ndenumerate(self.array): + bfval = bfbin2float(float2bfbin(x)) + self.array[idx] = bfval + assert self.array.dtype == numpy.float32 for idx, x in numpy.ndenumerate(self.array): if random.random() < self.sparsity: self.array[idx] = 0 @@ -107,19 +121,19 @@ def dump_outputs(self, format=None, tpose=False, dump_shape=True, if glb_override: lines = [len(fake_lines_seg), *fake_lines_seg, len(fake_lines_crd), *fake_lines_crd] self.write_array(lines, name=f"tensor_{self.name}_mode_{mode}{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) else: self.write_array(fake_lines_seg, name=f"tensor_{self.name}_mode_{mode}_seg{suffix}", - dump_dir=use_dir, hex=print_hex) + dump_dir=use_dir, dump_hex=print_hex) self.write_array(fake_lines_crd, name=f"tensor_{self.name}_mode_{mode}_crd{suffix}", - dump_dir=use_dir, hex=print_hex) + dump_dir=use_dir, dump_hex=print_hex) if glb_override: lines = [len(fake_lines_val), *fake_lines_val] self.write_array(fake_lines_val, name=f"tensor_{self.name}_mode_vals{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) else: self.write_array(fake_lines_val, name=f"tensor_{self.name}_mode_vals{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) return @@ -129,12 +143,12 @@ def dump_outputs(self, format=None, tpose=False, dump_shape=True, seg_arr, coord_arr = self._dump_csf(tmp_lvl_list) if glb_override: lines = [len(seg_arr), *seg_arr, len(coord_arr), *coord_arr] - self.write_array(lines, name=f"tensor_{self.name}_mode_0{suffix}", dump_dir=use_dir, hex=print_hex) + self.write_array(lines, name=f"tensor_{self.name}_mode_0{suffix}", dump_dir=use_dir, dump_hex=print_hex) else: self.write_array(seg_arr, name=f"tensor_{self.name}_mode_0_seg{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) self.write_array(coord_arr, name=f"tensor_{self.name}_mode_0_crd{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) at_vals = False i = 1 @@ -157,21 +171,21 @@ def dump_outputs(self, format=None, tpose=False, dump_shape=True, lines = [len(tmp_lvl_list), *tmp_lvl_list] # self.write_array(tmp_lvl_list, name=f"tensor_{self.name}_mode_vals" dump_dir=use_dir) self.write_array(lines, name=f"tensor_{self.name}_mode_vals{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex, is_val=True) else: self.write_array(tmp_lvl_list, name=f"tensor_{self.name}_mode_vals{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex, is_val=True) else: seg_arr, coord_arr = self._dump_csf(tmp_lvl_list) if glb_override: lines = [len(seg_arr), *seg_arr, len(coord_arr), *coord_arr] self.write_array(lines, name=f"tensor_{self.name}_mode_{i}{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) else: self.write_array(seg_arr, name=f"tensor_{self.name}_mode_{i}_seg{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) self.write_array(coord_arr, name=f"tensor_{self.name}_mode_{i}_crd{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) i = i + 1 elif self.format == "UNC": flat_array = [] @@ -179,10 +193,10 @@ def dump_outputs(self, format=None, tpose=False, dump_shape=True, flat_array.append(val) if glb_override: lines = [len(flat_array), *flat_array] - self.write_array(lines, name=f"tensor_{self.name}_mode_vals{suffix}", dump_dir=use_dir, hex=print_hex) + self.write_array(lines, name=f"tensor_{self.name}_mode_vals{suffix}", dump_dir=use_dir, dump_hex=print_hex) else: self.write_array(flat_array, name=f"tensor_{self.name}_mode_vals{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) elif self.format == "COO": crd_dict = dict() order = len(self.array.shape) @@ -200,24 +214,24 @@ def dump_outputs(self, format=None, tpose=False, dump_shape=True, if glb_override: lines = [len(crd_dict[key]), *crd_dict[key]] self.write_array(lines, name=f"tensor_{self.name}_mode_vals{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) else: self.write_array(crd_dict[key], name=f"tensor_{self.name}_mode_vals{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) else: if glb_override: lines = [len(crd_dict[key]), *crd_dict[key]] self.write_array(lines, name=f"tensor_{self.name}_mode_{key}_crd{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) else: self.write_array(crd_dict[key], name=f"tensor_{self.name}_mode_{key}_crd{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) if dump_shape: self.write_array(self.array.shape, name=f"tensor_{self.name}_mode_shape{suffix}", dump_dir=use_dir, - hex=print_hex) + dump_hex=print_hex) # Transpose it back if tpose is True: @@ -246,7 +260,7 @@ def _dump_csf(self, level_list): return seg_arr, coord_arr - def write_array(self, str_list, name, dump_dir=None, hex=False): + def write_array(self, str_list, name, dump_dir=None, dump_hex=False, is_val=False): """Write an array/list to a file Args: @@ -259,11 +273,18 @@ def write_array(self, str_list, name, dump_dir=None, hex=False): full_path = dump_dir + "/" + name with open(full_path, "w+") as wr_file: for item in str_list: - item_int = int(item) - if hex: - wr_file.write(f"{item_int:04X}\n") + data = item + if not is_val: + data = int(item) + if dump_hex: + if not type(data) == numpy.float32: + wr_file.write(f"{data:04X}\n") + else: + # converting result to bf16 hexidecimal representation + data = hex(int(float2bfbin(data), 2))[2:].zfill(4) + wr_file.write(f"{data}\n") else: - wr_file.write(f"{item_int}\n") + wr_file.write(f"{data}\n") def get_shape(self): return self.shape From b99e4a2825516add1d5254be96184cb1a3cc62d2 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 22 Nov 2023 14:41:17 -0800 Subject: [PATCH 03/20] add 'exp' to the list of glb name to be annotated --- sam/onyx/parse_dot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sam/onyx/parse_dot.py b/sam/onyx/parse_dot.py index 8533c9a8..14adb80d 100644 --- a/sam/onyx/parse_dot.py +++ b/sam/onyx/parse_dot.py @@ -1016,7 +1016,7 @@ def duplicate_graph(self, tensor, factor, output='x'): def annotate_IO_nodes(self): original_nodes = self.graph.get_nodes() output_nodes = ['x', 'X'] - input_nodes = ['c', 'C', 'b', 'B', 'd', 'D', 'e', 'E', 'f', 'F'] + input_nodes = ['c', 'C', 'b', 'B', 'd', 'D', 'e', 'E', 'f', 'F', 'exp'] exclude_nodes = ['b', 'B'] for node in original_nodes: node_attrs = node.get_attributes() From a74f4708710d46d6e69883d6cf9cab626d93e14b Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Mon, 27 Nov 2023 16:07:03 -0800 Subject: [PATCH 04/20] add parsing and configuration support for the ops required by exp --- sam/onyx/hw_nodes/compute_node.py | 10 ++++++++++ sam/onyx/parse_dot.py | 2 ++ 2 files changed, 12 insertions(+) diff --git a/sam/onyx/hw_nodes/compute_node.py b/sam/onyx/hw_nodes/compute_node.py index 96e492bb..d9823ea9 100644 --- a/sam/onyx/hw_nodes/compute_node.py +++ b/sam/onyx/hw_nodes/compute_node.py @@ -176,6 +176,16 @@ def configure(self, attributes): op_code = 2 elif c_op == 'max': op_code = 4 + elif c_op == 'and': + op_code = 5 + elif c_op == 'fp_mul': + op_code = 6 + elif c_op == 'fgetfint': + op_code = 7 + elif c_op == 'fgetffrac': + op_code = 8 + elif c_op == 'faddiexp': + op_code = 9 cfg_kwargs = { 'op': op_code, 'use_dense': use_dense, diff --git a/sam/onyx/parse_dot.py b/sam/onyx/parse_dot.py index 0056e9a7..74dbe133 100644 --- a/sam/onyx/parse_dot.py +++ b/sam/onyx/parse_dot.py @@ -101,6 +101,8 @@ def map_nodes(self): hw_nt = f"HWNodeType.Repeat" elif n_type == "mul" or n_type == "add" or n_type == "max": hw_nt = f"HWNodeType.Compute" + elif n_type == "fgetfint" or n_type == "fgetffrac" or n_type == "faddiexp": + hw_nt = f"HWNodeType.Compute" elif n_type == "reduce": hw_nt = f"HWNodeType.Reduce" elif n_type == "intersect" or n_type == "union": From 30b1224e92c0b64d7e34667703ea31447668d02c Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Mon, 27 Nov 2023 17:57:24 -0800 Subject: [PATCH 05/20] added support to configure one of the operand for fp_mul and and as a constant --- sam/onyx/hw_nodes/compute_node.py | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/sam/onyx/hw_nodes/compute_node.py b/sam/onyx/hw_nodes/compute_node.py index b05a5741..cbaa0434 100644 --- a/sam/onyx/hw_nodes/compute_node.py +++ b/sam/onyx/hw_nodes/compute_node.py @@ -1,4 +1,5 @@ from sam.onyx.hw_nodes.hw_node import * +from lassen.utils import float2bfbin class ComputeNode(HWNode): @@ -184,11 +185,24 @@ def configure(self, attributes): elif c_op == 'fgetffrac': op_code = 8 elif c_op == 'faddiexp': - op_code = 9 + op_code = 9 + + rb_const = None + if "rb_const" in attributes: + # the b operand of the op is a constant + rb_const = attributes["rb_const"].strip('"') + if "." in rb_const: + # constant is a floating point + rb_const = float2bfbin(float(rb_const)) + else: + # it is a int + rb_const = int(rb_const) + cfg_kwargs = { 'op': op_code, 'use_dense': use_dense, 'pe_only': pe_only, - 'pe_in_external': pe_in_external + 'pe_in_external': pe_in_external, + 'rb_const': rb_const } - return (op_code, use_dense, pe_only, pe_in_external), cfg_kwargs + return (op_code, use_dense, pe_only, pe_in_external, rb_const), cfg_kwargs From f013dd32195e9132094a6ace69eb003d51477cda Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Tue, 28 Nov 2023 00:16:44 -0800 Subject: [PATCH 06/20] update get_matrix_from file function to enable bf16 --- sam/onyx/generate_matrices.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/sam/onyx/generate_matrices.py b/sam/onyx/generate_matrices.py index 6efdb9d8..c65fa7e7 100644 --- a/sam/onyx/generate_matrices.py +++ b/sam/onyx/generate_matrices.py @@ -453,7 +453,7 @@ def run_statistics(name, seed, shape, dump_dir, sparsity): return (avg1, avg2) -def create_matrix_from_point_list(name, pt_list, shape) -> MatrixGenerator: +def create_matrix_from_point_list(name, pt_list, shape, use_fp=False) -> MatrixGenerator: mat_base = numpy.zeros(shape) dims = len(shape) for pt_idx in range(len(pt_list[0])): @@ -462,6 +462,16 @@ def create_matrix_from_point_list(name, pt_list, shape) -> MatrixGenerator: pt_base.append(pt_list[i][pt_idx]) mat_base[tuple(pt_base)] = pt_list[dims][pt_idx] + # Convert the input matrix to MatrixGenerator according to specified use_fp + if use_fp: + mat_base = mat_base.astype(numpy.float32) + for idx, x in numpy.ndenumerate(mat_base): + # Convert the input from int to bfloat16 + tmp_x = bin(int(x))[2:].zfill(16) + mat_base[idx] = bfbin2float(tmp_x) + else: + mat_base = mat_base.astype(numpy.uint16, casting='unsafe') + mg = MatrixGenerator(name=f"{name}", shape=shape, sparsity=0.7, format='CSF', dump_dir=None, tensor=mat_base) return mg @@ -503,7 +513,7 @@ def convert_aha_glb_output_file(glbfile, output_dir): def get_tensor_from_files(name, files_dir, shape, base=10, format='CSF', early_terminate=None, tensor_ordering=None, - suffix="", positive_only=True) -> MatrixGenerator: + suffix="", positive_only=True, use_fp=False) -> MatrixGenerator: all_files = os.listdir(files_dir) dims = len(shape) @@ -545,7 +555,7 @@ def get_tensor_from_files(name, files_dir, shape, base=10, segs.append(seg_t_) # Empty matrix... if len(seg_t_) == 2 and seg_t_[0] == 0 and seg_t_[1] == 0: - mg = MatrixGenerator(name=name, shape=shape, sparsity=1.0) + mg = MatrixGenerator(name=name, shape=shape, sparsity=1.0, use_fp=use_fp) created_empty = True break crd_t_ = read_inputs(f"{files_dir}/{crd_f}", intype=int, base=base, early_terminate=early_terminate, @@ -553,7 +563,7 @@ def get_tensor_from_files(name, files_dir, shape, base=10, crds.append(crd_t_) if not created_empty: pt_list = get_point_list(crds, segs, val_arr=vals) - mg = create_matrix_from_point_list(name, pt_list, shape_reordered) + mg = create_matrix_from_point_list(name, pt_list, shape_reordered, use_fp=use_fp) elif format == 'COO': crds = [] for mode in range(dims): From 18e2b2d18051a55f8211d4d4fbbf32e220e93959 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Tue, 28 Nov 2023 14:20:22 -0800 Subject: [PATCH 07/20] fixed bug in decoding the rb_const value for fp_mul and and --- sam/onyx/hw_nodes/compute_node.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sam/onyx/hw_nodes/compute_node.py b/sam/onyx/hw_nodes/compute_node.py index cbaa0434..b3d375b1 100644 --- a/sam/onyx/hw_nodes/compute_node.py +++ b/sam/onyx/hw_nodes/compute_node.py @@ -193,7 +193,8 @@ def configure(self, attributes): rb_const = attributes["rb_const"].strip('"') if "." in rb_const: # constant is a floating point - rb_const = float2bfbin(float(rb_const)) + rb_const = float(rb_const) + rb_const = int(float2bfbin(rb_const), 2) else: # it is a int rb_const = int(rb_const) From c82d99e1ae5003b3e8307d4856437d314eb19a65 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Thu, 7 Dec 2023 15:22:57 -0800 Subject: [PATCH 08/20] added graph for spmv and spmv_relu --- compiler/sam-outputs/onyx-dot/spmv.gv | 37 +++++++++++++++++ compiler/sam-outputs/onyx-dot/spmv_relu.gv | 47 ++++++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 compiler/sam-outputs/onyx-dot/spmv.gv create mode 100644 compiler/sam-outputs/onyx-dot/spmv_relu.gv diff --git a/compiler/sam-outputs/onyx-dot/spmv.gv b/compiler/sam-outputs/onyx-dot/spmv.gv new file mode 100644 index 00000000..54311ebe --- /dev/null +++ b/compiler/sam-outputs/onyx-dot/spmv.gv @@ -0,0 +1,37 @@ +digraph SAM { + comment="x=s0,B=ss01,c=d0" + 14 [comment="type=fiberlookup,index=i,tensor=B,mode=0,format=compressed,src=true,root=true" label="FiberLookup i: B0\ncompressed" color=green4 shape=box style=filled type="fiberlookup" index="i" tensor="B" mode="0" format="compressed" src="true" root="true"] + 13 [comment="type=broadcast" shape=point style=invis type="broadcast"] + 7 [comment="type=crddrop,outer=i,inner=j" label="CrdDrop i,j" color=orange shape=box style=filled type="crddrop" outer="i" inner="j"] + 1 [comment="type=fiberwrite,index=i,tensor=x,mode=0,format=compressed,segsize=2,crdsize=B0_dim,sink=true" label="FiberWrite i: x0\ncompressed" color=green3 shape=box style=filled type="fiberwrite" index="i" tensor="x" mode="0" format="compressed" segsize="2" crdsize="B0_dim" sink="true"] + 12 [comment="type=repsiggen,index=i" label="RepeatSignalGenerator i" color=cyan3 shape=box style=filled type="repsiggen" index="i"] + 11 [comment="type=repeat,index=i,tensor=c,root=true" label="Repeat i: c" color=cyan2 shape=box style=filled type="repeat" index="i" tensor="c" root="true"] + 10 [comment="type=fiberlookup,index=j,tensor=c,mode=0,format=dense,src=true,root=false" label="FiberLookup j: c0\ncompressed" color=green4 shape=box style=filled type="fiberlookup" index="j" tensor="c" mode="0" format="dense" src="true" root="false"] + 8 [comment="type=intersect,index=j" label="intersect j" color=purple shape=box style=filled type="intersect" index="j"] + 5 [comment="type=arrayvals,tensor=B" label="Array Vals: B" color=green2 shape=box style=filled type="arrayvals" tensor="B"] + 4 [comment="type=mul" label="Mul" color=brown shape=box style=filled type="mul"] + 3 [comment="type=reduce" label="Reduce" color=brown shape=box style=filled type="reduce"] + 0 [comment="type=fiberwrite,mode=vals,tensor=x,size=1*B0_dim,sink=true" label="FiberWrite Vals: x" color=green3 shape=box style=filled type="fiberwrite" tensor="x" mode="vals" size="1*B0_dim" sink="true"] + 6 [comment="type=arrayvals,tensor=c" label="Array Vals: c" color=green2 shape=box style=filled type="arrayvals" tensor="c"] + 9 [comment="type=fiberlookup,index=j,tensor=B,mode=1,format=compressed,src=true,root=false" label="FiberLookup j: B1\ncompressed" color=green4 shape=box style=filled type="fiberlookup" index="j" tensor="B" mode="1" format="compressed" src="true" root="false"] + 14 -> 13 [label="crd" style=dashed type="crd" comment=""] + 13 -> 7 [label="crd_i" style=dashed type="crd" comment="i"] + 7 -> 1 [label="crd_outer-i" style=dashed type="crd" comment="outer-i"] + 13 -> 12 [label="crd" style=dashed type="crd" comment=""] + 12 -> 11 [label="repsig" style=dotted type="repsig"] + 11 -> 10 [label="ref" style=bold type="ref"] + 10 -> 8 [label="crd_in-c" style=dashed type="crd" comment="in-c"] + 8 -> 5 [label="ref_out-B" style=bold type="ref" comment="out-B"] + 5 -> 4 [label="val" type="val"] + 3 -> 0 [label="val" type="val"] + 8 -> 6 [label="ref_out-c" style=bold type="ref" comment="out-c"] + 6 -> 4 [label="val" type="val"] + 10 -> 8 [label="ref_in-c" style=bold type="ref" comment="in-c"] + 14 -> 9 [label="ref" style=bold type="ref" comment=""] + 9 -> 8 [label="crd_in-B" style=dashed type="crd" comment="in-B"] + 9 -> 8 [label="ref_in-B" style=bold type="ref" comment="in-B"] + + 8 -> 7 [label="crd_in-j" style=dashed type="crd" comment="in-j"] + 4 -> 3 [label="val" type="val"] + +} diff --git a/compiler/sam-outputs/onyx-dot/spmv_relu.gv b/compiler/sam-outputs/onyx-dot/spmv_relu.gv new file mode 100644 index 00000000..55d2bfe1 --- /dev/null +++ b/compiler/sam-outputs/onyx-dot/spmv_relu.gv @@ -0,0 +1,47 @@ +digraph SAM { + comment="x=s0,B=ss01,c=d0" + 14 [comment="type=fiberlookup,index=i,tensor=B,mode=0,format=compressed,src=true,root=true" label="FiberLookup i: B0\ncompressed" color=green4 shape=box style=filled type="fiberlookup" index="i" tensor="B" mode="0" format="compressed" src="true" root="true"] + 13 [comment="type=broadcast" shape=point style=invis type="broadcast"] + 12 [comment="type=repsiggen,index=i" label="RepeatSignalGenerator i" color=cyan3 shape=box style=filled type="repsiggen" index="i"] + 11 [comment="type=repeat,index=i,tensor=c,root=true" label="Repeat i: c" color=cyan2 shape=box style=filled type="repeat" index="i" tensor="c" root="true"] + 10 [comment="type=fiberlookup,index=j,tensor=c,mode=0,format=dense,src=true,root=false" label="FiberLookup j: c0\ncompressed" color=green4 shape=box style=filled type="fiberlookup" index="j" tensor="c" mode="0" format="dense" src="true" root="false"] + 8 [comment="type=intersect,index=j" label="intersect j" color=purple shape=box style=filled type="intersect" index="j"] + 5 [comment="type=arrayvals,tensor=B" label="Array Vals: B" color=green2 shape=box style=filled type="arrayvals" tensor="B"] + 4 [comment="type=mul" label="Mul" color=brown shape=box style=filled type="mul"] + 3 [comment="type=reduce" label="Reduce" color=brown shape=box style=filled type="reduce"] + 6 [comment="type=arrayvals,tensor=c" label="Array Vals: c" color=green2 shape=box style=filled type="arrayvals" tensor="c"] + 9 [comment="type=fiberlookup,index=j,tensor=B,mode=1,format=compressed,src=true,root=false" label="FiberLookup j: B1\ncompressed" color=green4 shape=box style=filled type="fiberlookup" index="j" tensor="B" mode="1" format="compressed" src="true" root="false"] + + + 20 [comment="type=max" label="Max 0" color=brown shape=box style=filled type="max"] + 0 [comment="type=fiberwrite,mode=vals,tensor=x,size=1*B0_dim,sink=true" label="FiberWrite Vals: x" color=green3 shape=box style=filled type="fiberwrite" tensor="x" mode="vals" size="1*B0_dim" sink="true"] + 21 [comment="type=crddrop,outer=i,inner=val,mode=0" label="CrdDrop Compression i, val" color=orange style=filled type="crddrop" outer="i" inner="val" mode="0"] + 2 [comment="type=fiberwrite,index=i,tensor=x,mode=0,format=compressed,segsize=2,crdsize=B0_dim,sink=true" label="FiberWrite i: x0\ncompressed" color=green3 shape=box style=filled type="fiberwrite" index="i" tensor="x" mode="0" format="compressed" segsize="2" crdsize="B0_dim" sink="true"] + + + + 14 -> 13 [label="crd" style=dashed type="crd" comment=""] + 13 -> 12 [label="crd" style=dashed type="crd" comment=""] + 12 -> 11 [label="repsig" style=dotted type="repsig"] + 11 -> 10 [label="ref" style=bold type="ref"] + 10 -> 8 [label="crd_in-c" style=dashed type="crd" comment="in-c"] + 8 -> 5 [label="ref_out-B" style=bold type="ref" comment="out-B"] + 5 -> 4 [label="val" type="val"] + 8 -> 6 [label="ref_out-c" style=bold type="ref" comment="out-c"] + 6 -> 4 [label="val" type="val"] + 10 -> 8 [label="ref_in-c" style=bold type="ref" comment="in-c"] + 14 -> 9 [label="ref" style=bold type="ref" comment=""] + 9 -> 8 [label="crd_in-B" style=dashed type="crd" comment="in-B"] + 9 -> 8 [label="ref_in-B" style=bold type="ref" comment="in-B"] + + + 4 -> 3 [label="val" type="val"] + + + 3 -> 20 [label="val" type="val" comment="val"] + 20 -> 21 [label="val" type="val" comment="inner-val"] + 13 -> 21 [label="crd_i" style=dashed type="crd" comment="i"] + 21 -> 0 [label="val" type="val", comment="val"] + 21 -> 2 [label="crd_outer-i" style=dashed type="crd" comment="outer-i"] + +} From d132d85b0d6d0e5820978461ea3dfdf4f8c275cc Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 19:26:14 -0800 Subject: [PATCH 09/20] added mapping and routing support for fp_max, fp_add and faddiexp instruction for the alu --- sam/onyx/hw_nodes/compute_node.py | 16 +++++++++++++--- sam/onyx/hw_nodes/read_scanner_node.py | 10 +++++++++- sam/onyx/hw_nodes/reduce_node.py | 2 +- sam/onyx/parse_dot.py | 4 ++-- 4 files changed, 25 insertions(+), 7 deletions(-) diff --git a/sam/onyx/hw_nodes/compute_node.py b/sam/onyx/hw_nodes/compute_node.py index b3d375b1..eac07348 100644 --- a/sam/onyx/hw_nodes/compute_node.py +++ b/sam/onyx/hw_nodes/compute_node.py @@ -119,10 +119,16 @@ def connect(self, other, edge, kwargs=None): other_conn = other.get_num_inputs() pe = self.get_name() # TODO: remove hack eventually - if 'Max' in other.op: + if 'Max 0' in other.op: other_conn = 1 - else: - other_conn = other.get_num_inputs() + elif 'Faddiexp' in other.op: + comment = edge.get_attributes()["comment"].strip('"') + if 'fp' in comment: + other_conn = 0 + elif 'exp' in comment: + other_conn = 1 + else: + assert 0 & "edge connected to faddiexp has to have comment specified to either 'exp' or 'fp'" new_conns = { f'pe_to_pe_{other_conn}': [ ([(pe, "res"), (other_pe, f"data{other_conn}")], 17), @@ -186,6 +192,10 @@ def configure(self, attributes): op_code = 8 elif c_op == 'faddiexp': op_code = 9 + elif c_op == 'fp_max': + op_code = 10 + elif c_op == 'fp_add': + op_code = 11 rb_const = None if "rb_const" in attributes: diff --git a/sam/onyx/hw_nodes/read_scanner_node.py b/sam/onyx/hw_nodes/read_scanner_node.py index 14c2bd18..72afd45e 100644 --- a/sam/onyx/hw_nodes/read_scanner_node.py +++ b/sam/onyx/hw_nodes/read_scanner_node.py @@ -204,7 +204,15 @@ def connect(self, other, edge, kwargs=None): # Can use dynamic information to assign inputs to compute nodes # since add/mul are commutative compute_conn = other.get_num_inputs() - + # TODO: get rid of this hack + if 'Faddiexp' in other.op: + comment = edge.get_attributes()["comment"].strip('"') + if 'fp' in comment: + compute_conn = 0 + elif 'exp' in comment: + compute_conn = 1 + else: + assert 0 & "edge connected to faddiexp has to have comment specified to either 'exp' or 'fp'" new_conns = { f'rd_scan_to_compute_{compute_conn}': [ ([(rd_scan, "coord_out"), (compute, f"data{compute_conn}")], 17), diff --git a/sam/onyx/hw_nodes/reduce_node.py b/sam/onyx/hw_nodes/reduce_node.py index 2c904309..b19cf5e2 100644 --- a/sam/onyx/hw_nodes/reduce_node.py +++ b/sam/onyx/hw_nodes/reduce_node.py @@ -68,7 +68,7 @@ def connect(self, other, edge, kwargs=None): raise NotImplementedError(f'Cannot connect ReduceNode to {other_type}') elif other_type == ComputeNode: pe = other.get_name() - if 'Max' in other.op: + if 'Max 0' in other.op: other_conn = 1 else: other_conn = other.get_num_inputs() diff --git a/sam/onyx/parse_dot.py b/sam/onyx/parse_dot.py index 0da63382..fa18a557 100644 --- a/sam/onyx/parse_dot.py +++ b/sam/onyx/parse_dot.py @@ -99,11 +99,11 @@ def map_nodes(self): hw_nt = f"HWNodeType.RepSigGen" elif n_type == "repeat": hw_nt = f"HWNodeType.Repeat" - elif n_type == "mul" or n_type == "add" or n_type == "max": + elif n_type == "mul" or n_type == "add" or n_type == "max" or n_type == "and": hw_nt = f"HWNodeType.Compute" elif n_type == "fgetfint" or n_type == "fgetffrac" or n_type == "faddiexp": hw_nt = f"HWNodeType.Compute" - elif n_type == "fp_mul": + elif n_type == "fp_mul" or n_type == "fp_max" or n_type == "fp_add": hw_nt = f"HWNodeType.Compute" elif n_type == "reduce": hw_nt = f"HWNodeType.Reduce" From ff8f544193e46af4fcdc837ead9ad6089f6a0e26 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 19:27:07 -0800 Subject: [PATCH 10/20] update matrix generation code to avoid turning 0 into a very small value --- sam/onyx/generate_matrices.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sam/onyx/generate_matrices.py b/sam/onyx/generate_matrices.py index c65fa7e7..34f4da93 100644 --- a/sam/onyx/generate_matrices.py +++ b/sam/onyx/generate_matrices.py @@ -51,6 +51,8 @@ def __init__(self, name='B', shape=None, sparsity=0.6, format='CSF', dump_dir=No else: self.array = tensor for idx, x in numpy.ndenumerate(self.array): + if x == 0.0: + continue self.array[idx] = bfbin2float(float2bfbin(x)) self.shape = self.array.shape else: @@ -466,6 +468,9 @@ def create_matrix_from_point_list(name, pt_list, shape, use_fp=False) -> MatrixG if use_fp: mat_base = mat_base.astype(numpy.float32) for idx, x in numpy.ndenumerate(mat_base): + if x == 0.0: + # don't need to truncate if it is already a zero + continue # Convert the input from int to bfloat16 tmp_x = bin(int(x))[2:].zfill(16) mat_base[idx] = bfbin2float(tmp_x) From 3992942695835103974e8d518d6042cc0f10e7b2 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 19:37:15 -0800 Subject: [PATCH 11/20] add graph of mat_elemadd_leaky_relu.gv --- .../onyx-dot/mat_elemadd_leakyrelu_exp.gv | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 compiler/sam-outputs/onyx-dot/mat_elemadd_leakyrelu_exp.gv diff --git a/compiler/sam-outputs/onyx-dot/mat_elemadd_leakyrelu_exp.gv b/compiler/sam-outputs/onyx-dot/mat_elemadd_leakyrelu_exp.gv new file mode 100644 index 00000000..aaad0587 --- /dev/null +++ b/compiler/sam-outputs/onyx-dot/mat_elemadd_leakyrelu_exp.gv @@ -0,0 +1,54 @@ +digraph SAM { + comment="X=ss01,B=ss01,C=ss01" + 10 [comment="type=fiberlookup,index=i,tensor=B,mode=0,format=compressed,src=true,root=true" label="FiberLookup i: B0\ncompressed" color=green4 shape=box style=filled type="fiberlookup" index="i" tensor="B" mode="0" format="compressed" src="true" root="true"] + 9 [comment="type=union,index=i" label="union i" color=purple shape=box style=filled type="union" index="i"] + 2 [comment="type=fiberwrite,index=i,tensor=X,mode=0,format=compressed,segsize=2,crdsize=B0_dim,sink=true" label="FiberWrite i: X0\ncompressed" color=green3 shape=box style=filled type="fiberwrite" index="i" tensor="X" mode="0" format="compressed" segsize="2" crdsize="B0_dim" sink="true"] + 7 [comment="type=fiberlookup,index=j,tensor=B,mode=1,format=compressed,src=true,root=false" label="FiberLookup j: B1\ncompressed" color=green4 shape=box style=filled type="fiberlookup" index="j" tensor="B" mode="1" format="compressed" src="true" root="false"] + 6 [comment="type=union,index=j" label="union j" color=purple shape=box style=filled type="union" index="j"] + 1 [comment="type=fiberwrite,index=j,tensor=X,mode=1,format=compressed,segsize=B0_dim+1,crdsize=B0_dim*B1_dim,sink=true" label="FiberWrite j: X1\ncompressed" color=green3 shape=box style=filled type="fiberwrite" index="j" tensor="X" mode="1" format="compressed" segsize="B0_dim+1" crdsize="B0_dim*B1_dim" sink="true"] + 4 [comment="type=arrayvals,tensor=B" label="Array Vals: B" color=green2 shape=box style=filled type="arrayvals" tensor="B"] + 3 [comment="type=fp_add" label="FP_Add" color=brown shape=box style=filled type="fp_add"] + 12 [comment="broadcast" shape=point style=invis type="broadcast"] + 13 [comment="type=fp_mul,rb_const=0.2" label="FP_Mul * 0.2" color=brown shape=box style=filled type="fp_mul" rb_const="0.2"] + 14 [comment="type=fp_max" label="FP_Max" color=brown shape=box style=filled type="fp_max"] + 15 [comment="type=fp_mul,rb_const=1.44269504089" label="FP_Mul * 1.44269504089" color=brown shape=box style=filled type="fp_mul" rb_const="1.44269504089"] + 16 [comment="type=broadcast" shape=point style=invis type="broadcast"] + 17 [comment="type=fgetfint" label="Fgetfint" color=brown shape=box style=filled type="fgetfint"] + 18 [comment="type=fgetffrac" label="Fgetffrac" color=brown shape=box style=filled type="fgetffrac"] + 19 [comment="type=and,rb_const=255" label="And 0x00FF" color=brown shape=box style=filled type="and" rb_const="255"] + 20 [comment="type=faddiexp" label="Faddiexp" color=brown shape=box style=filled type="faddiexp"] + 21 [comment="type=arrayvals,tensor=exp" label="Array Vals: exp" color=green2 shape=box style=filled type="arrayvals" tensor="exp"] + 0 [comment="type=fiberwrite,mode=vals,tensor=X,size=1*B0_dim*B1_dim,sink=true" label="FiberWrite Vals: X" color=green3 shape=box style=filled type="fiberwrite" tensor="X" mode="vals" size="1*B0_dim*B1_dim" sink="true"] + 5 [comment="type=arrayvals,tensor=C" label="Array Vals: C" color=green2 shape=box style=filled type="arrayvals" tensor="C"] + 8 [comment="type=fiberlookup,index=j,tensor=C,mode=1,format=compressed,src=true,root=false" label="FiberLookup j: C1\ncompressed" color=green4 shape=box style=filled type="fiberlookup" index="j" tensor="C" mode="1" format="compressed" src="true" root="false"] + 11 [comment="type=fiberlookup,index=i,tensor=C,mode=0,format=compressed,src=true,root=true" label="FiberLookup i: C0\ncompressed" color=green4 shape=box style=filled type="fiberlookup" index="i" tensor="C" mode="0" format="compressed" src="true" root="true"] + 10 -> 9 [label="crd_in-B" style=dashed type="crd" comment="in-B"] + 9 -> 2 [label="crd" style=dashed type="crd"] + 9 -> 7 [label="ref_out-B" style=bold type="ref" comment="out-B"] + 7 -> 6 [label="crd_in-B" style=dashed type="crd" comment="in-B"] + 6 -> 1 [label="crd" style=dashed type="crd"] + 6 -> 4 [label="ref_out-B" style=bold type="ref" comment="out-B"] + 4 -> 3 [label="val" type="val"] + 3 -> 12 [label="val" type="val"] + 12 -> 13 [label="val" type="val"] + 12 -> 14 [label="val" type="val"] + 13 -> 14 [label="val" type="val"] + 14 -> 15 [label="val" type="val"] + 15 -> 16 [label="val" type="val"] + 16 -> 17 [label="val" type="val"] + 16 -> 18 [label="val" type="val"] + 18 -> 19 [label="val" type="val"] + 19 -> 21 [label="ref" style=bold type="ref"] + 21 -> 20 [label="val" type="val" comment="fp"] + 17 -> 20 [label="val" type="val" comment="exp"] + 20 -> 0 [label="val" type="val"] + 6 -> 5 [label="ref_out-C" style=bold type="ref" comment="out-C"] + 5 -> 3 [label="val" type="val"] + 7 -> 6 [label="ref_in-B" style=bold type="ref" comment="in-B"] + 9 -> 8 [label="ref_out-C" style=bold type="ref" comment="out-C"] + 8 -> 6 [label="crd_in-C" style=dashed type="crd" comment="in-C"] + 8 -> 6 [label="ref_in-C" style=bold type="ref" comment="in-C"] + 10 -> 9 [label="ref_in-B" style=bold type="ref" comment="in-B"] + 11 -> 9 [label="crd_in-C" style=dashed type="crd" comment="in-C"] + 11 -> 9 [label="ref_in-C" style=bold type="ref" comment="in-C"] +} From ee9ef68ffd12a5dcb112f3440c4e3a2dcacba301 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 20:11:04 -0800 Subject: [PATCH 12/20] added lassen to requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index 8671a953..d22c0b3f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,3 +28,4 @@ six==1.16.0 sparse==0.13.0 tomli==2.0.1 tqdm==4.64.1 +lassen @ git+https://github.com/StanfordAHA/lassen.git@master From 88699fa9a62c8e9f6c6ce2521d14770d76269ef7 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 20:13:23 -0800 Subject: [PATCH 13/20] added peak to requirements.txt --- requirements.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements.txt b/requirements.txt index d22c0b3f..36b24b3a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,4 +28,5 @@ six==1.16.0 sparse==0.13.0 tomli==2.0.1 tqdm==4.64.1 +peak @ git+https://github.com/cdonovick/peak.git@master lassen @ git+https://github.com/StanfordAHA/lassen.git@master From e7dabb516ee3fe17801632b73e6dbbc805b8253c Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 21:08:30 -0800 Subject: [PATCH 14/20] remove lassen and peak dependencies from requirements.txt and move them to python-package-conda.yml --- .github/workflows/python-package-conda.yml | 4 ++++ requirements.txt | 4 +--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index 055a945b..dda38a79 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -25,6 +25,10 @@ jobs: python -m virtualenv venv source venv/bin/activate pip install -r requirements.txt + git clone https://github.com/cdonovick/peak.git + python peak/setup.py install + git clone https://github.com/StanfordAHA/lassen.git + python lassen/setup.py install pip install -e . echo $VIRTUAL_ENV/bin >> $GITHUB_PATH - name: Test all (non SuiteSparse and Frostt) tests with pytest diff --git a/requirements.txt b/requirements.txt index 36b24b3a..5a64e46f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,6 +27,4 @@ scipy==1.10.0 six==1.16.0 sparse==0.13.0 tomli==2.0.1 -tqdm==4.64.1 -peak @ git+https://github.com/cdonovick/peak.git@master -lassen @ git+https://github.com/StanfordAHA/lassen.git@master +tqdm==4.64.1 \ No newline at end of file From 9cb68b7e8b6ee28640fc2c199976fb5168a9b9ce Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 23:08:43 -0800 Subject: [PATCH 15/20] update peak and lassen installation script --- .github/workflows/python-package-conda.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index dda38a79..f218ccdb 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -26,9 +26,9 @@ jobs: source venv/bin/activate pip install -r requirements.txt git clone https://github.com/cdonovick/peak.git - python peak/setup.py install + pip install -e peak git clone https://github.com/StanfordAHA/lassen.git - python lassen/setup.py install + pip install -e lassen pip install -e . echo $VIRTUAL_ENV/bin >> $GITHUB_PATH - name: Test all (non SuiteSparse and Frostt) tests with pytest From 973b47d8506783d9d84af9ae8051de7dcc389f13 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 23:12:36 -0800 Subject: [PATCH 16/20] remove peak and lassen directory after installation so the linter doesn't lint them --- .github/workflows/python-package-conda.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index f218ccdb..0085bde0 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -27,8 +27,10 @@ jobs: pip install -r requirements.txt git clone https://github.com/cdonovick/peak.git pip install -e peak + rm -rf peak git clone https://github.com/StanfordAHA/lassen.git pip install -e lassen + rm -rf lassen pip install -e . echo $VIRTUAL_ENV/bin >> $GITHUB_PATH - name: Test all (non SuiteSparse and Frostt) tests with pytest From 58c43a2e170f5fff02b3236753f9b87b4130a16c Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 23:20:46 -0800 Subject: [PATCH 17/20] add peak and lassen to the exclude list when running flake8 --- .github/workflows/python-package-conda.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index 0085bde0..74be8cd4 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -27,10 +27,8 @@ jobs: pip install -r requirements.txt git clone https://github.com/cdonovick/peak.git pip install -e peak - rm -rf peak git clone https://github.com/StanfordAHA/lassen.git pip install -e lassen - rm -rf lassen pip install -e . echo $VIRTUAL_ENV/bin >> $GITHUB_PATH - name: Test all (non SuiteSparse and Frostt) tests with pytest @@ -55,7 +53,7 @@ jobs: flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics --exclude venv - name: Python style checking flake8 run: | - flake8 . --count --select=E,W --statistics --ignore=W503,W504 --max-line-length=127 --exclude venv + flake8 . --count --select=E,W --statistics --ignore=W503,W504 --max-line-length=127 --exclude venv peak lassen - name: Check SAM Simulator generating script run: | make tests From 55202c5d90f77666e87399af6d17ef3fd784cf95 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 23:24:59 -0800 Subject: [PATCH 18/20] add peak and lassen to the exclude list when running flake8 --- .github/workflows/python-package-conda.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index 74be8cd4..2674afdd 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -48,9 +48,9 @@ jobs: run: | conda install flake8 # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics --exclude venv + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics --exclude venv peak lassen # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics --exclude venv + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics --exclude venv peak lassen - name: Python style checking flake8 run: | flake8 . --count --select=E,W --statistics --ignore=W503,W504 --max-line-length=127 --exclude venv peak lassen From 6442eb3b7491ce57db8200508b2fde340f2af7c2 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 23:29:07 -0800 Subject: [PATCH 19/20] fix syntax error in the --exclude argument of flake8 --- .github/workflows/python-package-conda.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/python-package-conda.yml b/.github/workflows/python-package-conda.yml index 2674afdd..563cad6c 100644 --- a/.github/workflows/python-package-conda.yml +++ b/.github/workflows/python-package-conda.yml @@ -48,12 +48,12 @@ jobs: run: | conda install flake8 # stop the build if there are Python syntax errors or undefined names - flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics --exclude venv peak lassen + flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics --exclude venv,peak,lassen # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics --exclude venv peak lassen + flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics --exclude venv,peak,lassen - name: Python style checking flake8 run: | - flake8 . --count --select=E,W --statistics --ignore=W503,W504 --max-line-length=127 --exclude venv peak lassen + flake8 . --count --select=E,W --statistics --ignore=W503,W504 --max-line-length=127 --exclude venv,peak,lassen - name: Check SAM Simulator generating script run: | make tests From e01e93fa03b460c38a16c6ae5938a1a778861f99 Mon Sep 17 00:00:00 2001 From: Bo Wun Cheng Date: Wed, 13 Dec 2023 23:40:56 -0800 Subject: [PATCH 20/20] fix code stype --- sam/onyx/generate_matrices.py | 4 ++-- sam/onyx/hw_nodes/compute_node.py | 4 ++-- sam/onyx/hw_nodes/read_scanner_node.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/sam/onyx/generate_matrices.py b/sam/onyx/generate_matrices.py index 34f4da93..add3d29d 100644 --- a/sam/onyx/generate_matrices.py +++ b/sam/onyx/generate_matrices.py @@ -70,7 +70,7 @@ def _create_matrix(self, value_cap=int(math.pow(2, 8)) - 1): if not self.use_fp: self.array = self.array.astype(int) else: - # convert to bfloat16 by truncating the trailing fraction bits + # convert to bfloat16 by truncating the trailing fraction bits # converting it to floating point number for idx, x in numpy.ndenumerate(self.array): bfval = bfbin2float(float2bfbin(x)) @@ -279,7 +279,7 @@ def write_array(self, str_list, name, dump_dir=None, dump_hex=False, is_val=Fals if not is_val: data = int(item) if dump_hex: - if not type(data) == numpy.float32: + if not isinstance(data, numpy.float32): wr_file.write(f"{data:04X}\n") else: # converting result to bf16 hexidecimal representation diff --git a/sam/onyx/hw_nodes/compute_node.py b/sam/onyx/hw_nodes/compute_node.py index eac07348..9c6af1e3 100644 --- a/sam/onyx/hw_nodes/compute_node.py +++ b/sam/onyx/hw_nodes/compute_node.py @@ -127,7 +127,7 @@ def connect(self, other, edge, kwargs=None): other_conn = 0 elif 'exp' in comment: other_conn = 1 - else: + else: assert 0 & "edge connected to faddiexp has to have comment specified to either 'exp' or 'fp'" new_conns = { f'pe_to_pe_{other_conn}': [ @@ -202,7 +202,7 @@ def configure(self, attributes): # the b operand of the op is a constant rb_const = attributes["rb_const"].strip('"') if "." in rb_const: - # constant is a floating point + # constant is a floating point rb_const = float(rb_const) rb_const = int(float2bfbin(rb_const), 2) else: diff --git a/sam/onyx/hw_nodes/read_scanner_node.py b/sam/onyx/hw_nodes/read_scanner_node.py index 72afd45e..b568fe24 100644 --- a/sam/onyx/hw_nodes/read_scanner_node.py +++ b/sam/onyx/hw_nodes/read_scanner_node.py @@ -211,7 +211,7 @@ def connect(self, other, edge, kwargs=None): compute_conn = 0 elif 'exp' in comment: compute_conn = 1 - else: + else: assert 0 & "edge connected to faddiexp has to have comment specified to either 'exp' or 'fp'" new_conns = { f'rd_scan_to_compute_{compute_conn}': [