From 24e19f9f62a55783387f645427cf0e7631baabd6 Mon Sep 17 00:00:00 2001 From: Miklos Homolya Date: Mon, 7 Nov 2016 10:52:13 +0000 Subject: [PATCH 1/2] remove flatten= option --- pyop2/base.py | 31 ++--- pyop2/fusion/extended.py | 5 +- pyop2/petsc_base.py | 8 +- pyop2/pyparloop.py | 2 - pyop2/sequential.py | 220 ++++++++---------------------------- test/unit/test_extrusion.py | 8 +- 6 files changed, 62 insertions(+), 212 deletions(-) diff --git a/pyop2/base.py b/pyop2/base.py index d8a7ec7ec..4ca458e9a 100644 --- a/pyop2/base.py +++ b/pyop2/base.py @@ -259,7 +259,7 @@ class Arg(object): Instead, use the call syntax on the :class:`DataCarrier`. """ - def __init__(self, data=None, map=None, idx=None, access=None, flatten=False): + def __init__(self, data=None, map=None, idx=None, access=None): """ :param data: A data-carrying object, either :class:`Dat` or class:`Mat` :param map: A :class:`Map` to access this :class:`Arg` or the default @@ -269,9 +269,6 @@ def __init__(self, data=None, map=None, idx=None, access=None, flatten=False): given component of the mapping or the default to use all components of the mapping. :param access: An access descriptor of type :class:`Access` - :param flatten: Treat the data dimensions of this :class:`Arg` as flat - s.t. the kernel is passed a flat vector of length - ``map.arity * data.dataset.cdim``. Checks that: @@ -284,7 +281,6 @@ def __init__(self, data=None, map=None, idx=None, access=None, flatten=False): self._map = map self._idx = idx self._access = access - self._flatten = flatten self._in_flight = False # some kind of comms in flight for this arg # Check arguments for consistency @@ -300,18 +296,10 @@ def __init__(self, data=None, map=None, idx=None, access=None, flatten=False): "To set of %s doesn't match the set of %s." % (map, data)) # Determine the iteration space extents, if any - if self._is_mat and flatten: - rdims = tuple(d.cdim for d in data.sparsity.dsets[0]) - cdims = tuple(d.cdim for d in data.sparsity.dsets[1]) - self._block_shape = tuple(tuple((mr.arity * dr, mc.arity * dc) - for mc, dc in zip(map[1], cdims)) - for mr, dr in zip(map[0], rdims)) - elif self._is_mat: + if self._is_mat: self._block_shape = tuple(tuple((mr.arity, mc.arity) for mc in map[1]) for mr in map[0]) - elif self._uses_itspace and flatten: - self._block_shape = tuple(((m.arity * d.cdim,),) for m, d in zip(map, data)) elif self._uses_itspace: self._block_shape = tuple(((m.arity,),) for m in map) else: @@ -1814,13 +1802,13 @@ def __init__(self, dataset, data=None, dtype=None, name=None, self._recv_buf = {} @validate_in(('access', _modes, ModeValueError)) - def __call__(self, access, path=None, flatten=False): + def __call__(self, access, path=None): if isinstance(path, _MapArg): return _make_object('Arg', data=self, map=path.map, idx=path.idx, - access=access, flatten=flatten) + access=access) if configuration["type_check"] and path and path.toset != self.dataset.set: raise MapValueError("To Set of Map does not match Set of Dat.") - return _make_object('Arg', data=self, map=path, access=access, flatten=flatten) + return _make_object('Arg', data=self, map=path, access=access) def __getitem__(self, idx): """Return self if ``idx`` is 0, raise an error otherwise.""" @@ -2634,10 +2622,7 @@ def __init__(self, dim, data=None, dtype=None, name=None, comm=None): Global._globalcount += 1 @validate_in(('access', _modes, ModeValueError)) - def __call__(self, access, path=None, flatten=False): - """Note that the flatten argument is only passed in order to - have the same interface as :class:`Dat`. Its value is - ignored.""" + def __call__(self, access, path=None): return _make_object('Arg', data=self, access=access) def __iter__(self): @@ -3636,14 +3621,14 @@ def __init__(self, sparsity, dtype=None, name=None): Mat._globalcount += 1 @validate_in(('access', _modes, ModeValueError)) - def __call__(self, access, path, flatten=False): + def __call__(self, access, path): path = as_tuple(path, _MapArg, 2) path_maps = tuple(arg and arg.map for arg in path) path_idxs = tuple(arg and arg.idx for arg in path) if configuration["type_check"] and tuple(path_maps) not in self.sparsity: raise MapValueError("Path maps not in sparsity maps") return _make_object('Arg', data=self, map=path_maps, access=access, - idx=path_idxs, flatten=flatten) + idx=path_idxs) def assemble(self): """Finalise this :class:`Mat` ready for use. diff --git a/pyop2/fusion/extended.py b/pyop2/fusion/extended.py index f4591c024..026dc5533 100644 --- a/pyop2/fusion/extended.py +++ b/pyop2/fusion/extended.py @@ -72,7 +72,7 @@ def __init__(self, arg, gather=None, c_index=False): :arg c_index: if True, will provide the kernel with the iteration index of this Arg's set. Otherwise, code generation is unaffected. """ - super(FusionArg, self).__init__(arg.data, arg.map, arg.idx, arg.access, arg._flatten) + super(FusionArg, self).__init__(arg.data, arg.map, arg.idx, arg.access) self.gather = gather or arg.gather self.c_index = c_index or arg.c_index @@ -83,11 +83,10 @@ def c_map_name(self, i, j, fromvector=False): def c_vec_dec(self, is_facet=False): if self.gather == 'onlymap': facet_mult = 2 if is_facet else 1 - cdim = self.data.cdim if self._flatten else 1 return "%(type)s %(vec_name)s[%(arity)s];\n" % \ {'type': self.ctype, 'vec_name': self.c_vec_name(), - 'arity': self.map.arity * cdim * facet_mult} + 'arity': self.map.arity * facet_mult} else: return super(FusionArg, self).c_vec_dec(is_facet) diff --git a/pyop2/petsc_base.py b/pyop2/petsc_base.py index 0734434b7..f2fadba06 100644 --- a/pyop2/petsc_base.py +++ b/pyop2/petsc_base.py @@ -778,23 +778,23 @@ def _init_global_block(self): mat = _DatMat(self.sparsity) self.handle = mat - def __call__(self, access, path, flatten=False): + def __call__(self, access, path): """Override the parent __call__ method in order to special-case global blocks in matrices.""" try: # Usual case - return super(Mat, self).__call__(access, path, flatten) + return super(Mat, self).__call__(access, path) except TypeError: # One of the path entries was not an Arg. if path == (None, None): return _make_object('Arg', data=self.handle.getPythonContext().global_, - access=access, flatten=flatten) + access=access) elif None in path: thispath = path[0] or path[1] return _make_object('Arg', data=self.handle.getPythonContext().dat, map=thispath.map, idx=thispath.idx, - access=access, flatten=flatten) + access=access) else: raise diff --git a/pyop2/pyparloop.py b/pyop2/pyparloop.py index 30d87542b..eed2d41ae 100644 --- a/pyop2/pyparloop.py +++ b/pyop2/pyparloop.py @@ -157,8 +157,6 @@ def _compute(self, part, *arglist): else: arg.data._data[arg.map.values_with_halo[idx, arg.idx:arg.idx+1]] = tmp[:] elif arg._is_mat: - if arg._flatten: - raise NotImplementedError # Need to sort out the permutation. if arg.access is base.INC: arg.data.addto_values(arg.map[0].values_with_halo[idx], arg.map[1].values_with_halo[idx], diff --git a/pyop2/sequential.py b/pyop2/sequential.py index a572a5200..0e00cd47a 100644 --- a/pyop2/sequential.py +++ b/pyop2/sequential.py @@ -114,12 +114,11 @@ def c_wrapper_arg(self): def c_vec_dec(self, is_facet=False): facet_mult = 2 if is_facet else 1 - cdim = self.data.cdim if self._flatten else 1 if self.map is not None: return "%(type)s *%(vec_name)s[%(arity)s];\n" % \ {'type': self.ctype, 'vec_name': self.c_vec_name(), - 'arity': self.map.arity * cdim * facet_mult} + 'arity': self.map.arity * facet_mult} else: return "%(type)s *%(vec_name)s;\n" % \ {'type': self.ctype, @@ -159,7 +158,7 @@ def c_ind_data_xtr(self, idx, i, j=0): {'name': self.c_arg_name(i), 'map_name': self.c_map_name(i, 0), 'idx': idx, - 'dim': 1 if self._flatten else str(self.data[i].cdim), + 'dim': str(self.data[i].cdim), 'off': ' + %d' % j if j else ''} def c_kernel_arg_name(self, i, j): @@ -188,12 +187,6 @@ def c_kernel_arg(self, count, i=0, j=0, shape=(0,), layers=1): else: if self.data is not None and self.data.dataset._extruded: return self.c_ind_data_xtr("i_%d" % self.idx.index, i) - elif self._flatten: - return "%(name)s + %(map_name)s[i * %(arity)s + i_0 %% %(arity)d] * %(dim)s + (i_0 / %(arity)d)" % \ - {'name': self.c_arg_name(), - 'map_name': self.c_map_name(0, i), - 'arity': self.map.arity, - 'dim': self.data[i].cdim} else: return self.c_ind_data("i_%d" % self.idx.index, i) elif self._is_indirect: @@ -219,45 +212,21 @@ def c_vec_init(self, is_top, is_facet=False): vec_idx = 0 for i, (m, d) in enumerate(zip(self.map, self.data)): is_top = is_top_init and m.iterset._extruded - if self._flatten: - for k in range(d.cdim): - for idx in range(m.arity): - val.append("%(vec_name)s[%(idx)s] = %(data)s" % - {'vec_name': self.c_vec_name(), - 'idx': vec_idx, - 'data': self.c_ind_data(idx, i, k, is_top=is_top, - offset=m.offset[idx] if is_top else None)}) - vec_idx += 1 - # In the case of interior horizontal facets the map for the - # vertical does not exist so it has to be dynamically - # created by adding the offset to the map of the current - # cell. In this way the only map required is the one for - # the bottom layer of cells and the wrapper will make sure - # to stage in the data for the entire map spanning the facet. - if is_facet: - for idx in range(m.arity): - val.append("%(vec_name)s[%(idx)s] = %(data)s" % - {'vec_name': self.c_vec_name(), - 'idx': vec_idx, - 'data': self.c_ind_data(idx, i, k, is_top=is_top, - offset=m.offset[idx])}) - vec_idx += 1 - else: + for idx in range(m.arity): + val.append("%(vec_name)s[%(idx)s] = %(data)s" % + {'vec_name': self.c_vec_name(), + 'idx': vec_idx, + 'data': self.c_ind_data(idx, i, is_top=is_top, + offset=m.offset[idx] if is_top else None)}) + vec_idx += 1 + if is_facet: for idx in range(m.arity): val.append("%(vec_name)s[%(idx)s] = %(data)s" % {'vec_name': self.c_vec_name(), 'idx': vec_idx, 'data': self.c_ind_data(idx, i, is_top=is_top, - offset=m.offset[idx] if is_top else None)}) + offset=m.offset[idx])}) vec_idx += 1 - if is_facet: - for idx in range(m.arity): - val.append("%(vec_name)s[%(idx)s] = %(data)s" % - {'vec_name': self.c_vec_name(), - 'idx': vec_idx, - 'data': self.c_ind_data(idx, i, is_top=is_top, - offset=m.offset[idx])}) - vec_idx += 1 return ";\n".join(val) def c_addto(self, i, j, buf_name, tmp_name, tmp_decl, @@ -283,35 +252,6 @@ def c_addto(self, i, j, buf_name, tmp_name, tmp_decl, addto = 'MatSetValuesLocal' if self.data._is_vector_field: addto = 'MatSetValuesBlockedLocal' - if self._flatten: - idx = "[%(ridx)s][%(cidx)s]" - ret = [] - idx_l = idx % {'ridx': "%d*j + k" % rbs, - 'cidx': "%d*l + m" % cbs} - idx_r = idx % {'ridx': "j + %d*k" % nrows, - 'cidx': "l + %d*m" % ncols} - # Shuffle xxx yyy zzz into xyz xyz xyz - ret = [""" - %(tmp_decl)s; - for ( int j = 0; j < %(nrows)d; j++ ) { - for ( int k = 0; k < %(rbs)d; k++ ) { - for ( int l = 0; l < %(ncols)d; l++ ) { - for ( int m = 0; m < %(cbs)d; m++ ) { - %(tmp_name)s%(idx_l)s = %(buf_name)s%(idx_r)s; - } - } - } - }""" % {'nrows': nrows, - 'ncols': ncols, - 'rbs': rbs, - 'cbs': cbs, - 'idx_l': idx_l, - 'idx_r': idx_r, - 'buf_name': buf_name, - 'tmp_decl': tmp_decl, - 'tmp_name': tmp_name}] - addto_name = tmp_name - rmap, cmap = maps rdim, cdim = self.data.dims[i][j] if rmap.vector_index is not None or cmap.vector_index is not None: @@ -406,9 +346,6 @@ def c_zero_tmp(self, i, j): return "%(name)s%(idx)s = (%(t)s)0" % \ {'name': self.c_kernel_arg_name(i, j), 't': t, 'idx': idx} elif self.data[i, j]._is_vector_field: - if self._flatten: - return "%(name)s[0][0] = (%(t)s)0" % \ - {'name': self.c_kernel_arg_name(i, j), 't': t} size = np.prod(self.data[i, j].dims) return "memset(%(name)s, 0, sizeof(%(t)s) * %(size)s)" % \ {'name': self.c_kernel_arg_name(i, j), 't': t, 'size': size} @@ -421,7 +358,14 @@ def c_add_offset(self, is_facet=False): val = [] vec_idx = 0 for i, (m, d) in enumerate(zip(self.map, self.data)): - for k in range(d.cdim if self._flatten else 1): + for idx in range(m.arity): + val.append("%(name)s[%(j)d] += %(offset)d * %(dim)s;" % + {'name': self.c_vec_name(), + 'j': vec_idx, + 'offset': m.offset[idx], + 'dim': d.cdim}) + vec_idx += 1 + if is_facet: for idx in range(m.arity): val.append("%(name)s[%(j)d] += %(offset)d * %(dim)s;" % {'name': self.c_vec_name(), @@ -429,14 +373,6 @@ def c_add_offset(self, is_facet=False): 'offset': m.offset[idx], 'dim': d.cdim}) vec_idx += 1 - if is_facet: - for idx in range(m.arity): - val.append("%(name)s[%(j)d] += %(offset)d * %(dim)s;" % - {'name': self.c_vec_name(), - 'j': vec_idx, - 'offset': m.offset[idx], - 'dim': d.cdim}) - vec_idx += 1 return '\n'.join(val)+'\n' # New globals generation which avoids false sharing. @@ -482,8 +418,6 @@ def c_map_decl(self, is_facet=False): for i, (map, dset) in enumerate(zip(as_tuple(self.map, Map), dsets)): for j, (m, d) in enumerate(zip(map, dset)): dim = m.arity - if self._is_dat and self._flatten: - dim *= d.cdim if is_facet: dim *= 2 val.append("int xtr_%(name)s[%(dim)s];" % @@ -499,42 +433,20 @@ def c_map_init(self, is_top=False, is_facet=False): for i, (map, dset) in enumerate(zip(as_tuple(self.map, Map), dsets)): for j, (m, d) in enumerate(zip(map, dset)): for idx in range(m.arity): - if self._is_dat and self._flatten and d.cdim > 1: - for k in range(d.cdim): - val.append("xtr_%(name)s[%(ind_flat)s] = %(dat_dim)s * (*(%(name)s + i * %(dim)s + %(ind)s)%(off_top)s)%(offset)s;" % - {'name': self.c_map_name(i, j), - 'dim': m.arity, - 'ind': idx, - 'dat_dim': d.cdim, - 'ind_flat': (2 if is_facet else 1) * m.arity * k + idx, - 'offset': ' + '+str(k) if k > 0 else '', - 'off_top': ' + start_layer * '+str(m.offset[idx]) if is_top else ''}) - else: - val.append("xtr_%(name)s[%(ind)s] = *(%(name)s + i * %(dim)s + %(ind)s)%(off_top)s;" % - {'name': self.c_map_name(i, j), - 'dim': m.arity, - 'ind': idx, - 'off_top': ' + start_layer * '+str(m.offset[idx]) if is_top else ''}) + val.append("xtr_%(name)s[%(ind)s] = *(%(name)s + i * %(dim)s + %(ind)s)%(off_top)s;" % + {'name': self.c_map_name(i, j), + 'dim': m.arity, + 'ind': idx, + 'off_top': ' + start_layer * '+str(m.offset[idx]) if is_top else ''}) if is_facet: for idx in range(m.arity): - if self._is_dat and self._flatten and d.cdim > 1: - for k in range(d.cdim): - val.append("xtr_%(name)s[%(ind_flat)s] = %(dat_dim)s * (*(%(name)s + i * %(dim)s + %(ind)s)%(off)s)%(offset)s;" % - {'name': self.c_map_name(i, j), - 'dim': m.arity, - 'ind': idx, - 'dat_dim': d.cdim, - 'ind_flat': m.arity * (k * 2 + 1) + idx, - 'offset': ' + '+str(k) if k > 0 else '', - 'off': ' + ' + str(m.offset[idx])}) - else: - val.append("xtr_%(name)s[%(ind)s] = *(%(name)s + i * %(dim)s + %(ind_zero)s)%(off_top)s%(off)s;" % - {'name': self.c_map_name(i, j), - 'dim': m.arity, - 'ind': idx + m.arity, - 'ind_zero': idx, - 'off_top': ' + start_layer' if is_top else '', - 'off': ' + ' + str(m.offset[idx])}) + val.append("xtr_%(name)s[%(ind)s] = *(%(name)s + i * %(dim)s + %(ind_zero)s)%(off_top)s%(off)s;" % + {'name': self.c_map_name(i, j), + 'dim': m.arity, + 'ind': idx + m.arity, + 'ind_zero': idx, + 'off_top': ' + start_layer' if is_top else '', + 'off': ' + ' + str(m.offset[idx])}) return '\n'.join(val)+'\n' def c_map_bcs(self, sign, is_facet): @@ -611,32 +523,16 @@ def c_add_offset_map(self, is_facet=False): continue for j, (m, d) in enumerate(zip(map, dset)): for idx in range(m.arity): - if self._is_dat and self._flatten and d.cdim > 1: - for k in range(d.cdim): - val.append("xtr_%(name)s[%(ind_flat)s] += %(off)d * %(dim)s;" % - {'name': self.c_map_name(i, j), - 'off': m.offset[idx], - 'ind_flat': m.arity * k + idx, - 'dim': d.cdim}) - else: + val.append("xtr_%(name)s[%(ind)s] += %(off)d;" % + {'name': self.c_map_name(i, j), + 'off': m.offset[idx], + 'ind': idx}) + if is_facet: + for idx in range(m.arity): val.append("xtr_%(name)s[%(ind)s] += %(off)d;" % {'name': self.c_map_name(i, j), 'off': m.offset[idx], - 'ind': idx}) - if is_facet: - for idx in range(m.arity): - if self._is_dat and self._flatten and d.cdim > 1: - for k in range(d.cdim): - val.append("xtr_%(name)s[%(ind_flat)s] += %(off)d * %(dim)s;" % - {'name': self.c_map_name(i, j), - 'off': m.offset[idx], - 'ind_flat': m.arity * (k + d.cdim) + idx, - 'dim': d.cdim}) - else: - val.append("xtr_%(name)s[%(ind)s] += %(off)d;" % - {'name': self.c_map_name(i, j), - 'off': m.offset[idx], - 'ind': m.arity + idx}) + 'ind': m.arity + idx}) return '\n'.join(val)+'\n' def c_buffer_decl(self, size, idx, buf_name, is_facet=False, init=True): @@ -657,7 +553,7 @@ def c_buffer_decl(self, size, idx, buf_name, is_facet=False, init=True): "init": init_expr} def c_buffer_gather(self, size, idx, buf_name): - dim = 1 if self._flatten else self.data.cdim + dim = self.data.cdim return ";\n".join(["%(name)s[i_0*%(dim)d%(ofs)s] = *(%(ind)s%(ofs)s);\n" % {"name": buf_name, "dim": dim, @@ -689,18 +585,6 @@ def c_buffer_scatter_offset(self, count, i, j, ofs_name): 'dim': self.data.split[i].cdim } - def c_buffer_scatter_vec_flatten(self, count, i, j, mxofs, buf_name, ofs_name, loop_size): - dim = self.data.split[i].cdim - return ";\n".join(["%(name)s[%(ofs_name)s%(nfofs)s] %(op)s %(buf_name)s[i_0%(buf_ofs)s%(mxofs)s]" % - {"name": self.c_arg_name(), - "op": "=" if self.access == WRITE else "+=", - "buf_name": buf_name, - "ofs_name": ofs_name, - "nfofs": " + %d" % o, - "buf_ofs": " + %d" % (o*loop_size,), - "mxofs": " + %d" % (mxofs[0] * dim) if mxofs else ""} - for o in range(dim)]) - class JITModule(base.JITModule): @@ -1063,17 +947,11 @@ def extrusion_loop(): if not arg._is_mat: # Readjust size to take into account the size of a vector space _dat_size = (arg.data.cdim,) - # Only adjust size if not flattening (in which case the buffer is extents*dat.dim) - if not arg._flatten: - _buf_size = [sum([e*d for e, d in zip(_buf_size, _dat_size)])] - _loop_size = [_buf_size[i]//_dat_size[i] for i in range(len(_buf_size))] - else: - _buf_size = [sum(_buf_size)] - _loop_size = _buf_size + _buf_size = [sum([e*d for e, d in zip(_buf_size, _dat_size)])] + _loop_size = [_buf_size[i]//_dat_size[i] for i in range(len(_buf_size))] else: - if not arg._flatten: - _dat_size = arg.data.dims[0][0] # TODO: [0][0] ? - _buf_size = [e*d for e, d in zip(_buf_size, _dat_size)] + _dat_size = arg.data.dims[0][0] # TODO: [0][0] ? + _buf_size = [e*d for e, d in zip(_buf_size, _dat_size)] _buf_decl[arg] = arg.c_buffer_decl(_buf_size, count, _buf_name[arg], is_facet=is_facet) _tmp_decl[arg] = arg.c_buffer_decl(_buf_size, count, _tmp_name[arg], is_facet=is_facet, init=False) @@ -1107,21 +985,11 @@ def itset_loop_body(i, j, shape, offsets, is_facet=False): raise NotImplementedError elif arg._is_mat: continue - elif arg._is_dat and not arg._flatten: + elif arg._is_dat: loop_size = shape[0]*mult _itspace_loops, _itspace_loop_close = itspace_loop(0, loop_size), '}' _scatter_stmts = arg.c_buffer_scatter_vec(count, i, j, offsets, _buf_name[arg]) _buf_offset, _buf_offset_decl = '', '' - elif arg._is_dat: - dim = arg.data.split[i].cdim - loop_size = shape[0]*mult//dim - _itspace_loops, _itspace_loop_close = itspace_loop(0, loop_size), '}' - _buf_offset_name = 'offset_%d[%s]' % (count, '%s') - _buf_offset_decl = 'int %s' % _buf_offset_name % loop_size - _buf_offset_array = _buf_offset_name % 'i_0' - _buf_offset = '%s;' % arg.c_buffer_scatter_offset(count, i, j, _buf_offset_array) - _scatter_stmts = arg.c_buffer_scatter_vec_flatten(count, i, j, offsets, _buf_name[arg], - _buf_offset_array, loop_size) else: raise NotImplementedError _buf_scatter[arg] = template_scatter % { diff --git a/test/unit/test_extrusion.py b/test/unit/test_extrusion.py index 9df325158..5e4c9caa9 100644 --- a/test/unit/test_extrusion.py +++ b/test/unit/test_extrusion.py @@ -481,8 +481,8 @@ def test_extruded_assemble_mat( iterset, xtr_nodes, 1, vertex_to_xtr_coords, "v2xtr_layer", v2xtr_layer_offset) op2.par_loop(extrusion_kernel, iterset, - coords_xtr(op2.INC, map_xtr, flatten=True), - coords(op2.READ, map_2d, flatten=True), + coords_xtr(op2.INC, map_xtr), + coords(op2.READ, map_2d), layer(op2.READ, layer_xtr)) # Assemble the main matrix. @@ -499,8 +499,8 @@ def test_extruded_assemble_mat( xtr_f = op2.Dat(d_lnodes_xtr, xtr_f_vals, numpy.int32, "xtr_f") op2.par_loop(vol_comp_rhs, xtr_elements, - xtr_b(op2.INC, xtr_elem_node[op2.i[0]], flatten=True), - coords_xtr(op2.READ, xtr_elem_node, flatten=True), + xtr_b(op2.INC, xtr_elem_node[op2.i[0]]), + coords_xtr(op2.READ, xtr_elem_node), xtr_f(op2.READ, xtr_elem_node)) assert_allclose(sum(xtr_b.data), 6.0, eps) From 404f587cf12b41c3c9ba6e4d401bc1220466c214 Mon Sep 17 00:00:00 2001 From: Miklos Homolya Date: Mon, 12 Dec 2016 12:51:23 +0000 Subject: [PATCH 2/2] remove some more dead code --- pyop2/sequential.py | 50 +-------------------------------------------- 1 file changed, 1 insertion(+), 49 deletions(-) diff --git a/pyop2/sequential.py b/pyop2/sequential.py index 0e00cd47a..cacf92fbd 100644 --- a/pyop2/sequential.py +++ b/pyop2/sequential.py @@ -54,7 +54,7 @@ from pyop2.petsc_base import Global, GlobalDataSet # noqa: F401 from pyop2.petsc_base import Dat, MixedDat, Mat # noqa: F401 from pyop2.configuration import configuration -from pyop2.exceptions import * +from pyop2.exceptions import * # noqa: F401 from pyop2.mpi import collective from pyop2.profiling import timed_region from pyop2.utils import as_tuple, cached_property, strip, get_petsc_dir @@ -167,9 +167,6 @@ def c_kernel_arg_name(self, i, j): def c_global_reduction_name(self, count=None): return self.c_arg_name() - def c_local_tensor_name(self, i, j): - return self.c_kernel_arg_name(i, j) - def c_kernel_arg(self, count, i=0, j=0, shape=(0,), layers=1): if self._is_dat_view and not self._is_direct: raise NotImplementedError("Indirect DatView not implemented") @@ -332,26 +329,6 @@ def c_addto(self, i, j, buf_name, tmp_name, tmp_decl, ret = " "*16 + "{\n" + "\n".join(ret) + "\n" + " "*16 + "}" return ret - def c_local_tensor_dec(self, extents, i, j): - if self._is_mat: - size = 1 - else: - size = self.data.split[i].cdim - return tuple([d * size for d in extents]) - - def c_zero_tmp(self, i, j): - t = self.ctype - if self.data[i, j]._is_scalar_field: - idx = ''.join(["[i_%d]" % ix for ix in range(len(self.data.dims))]) - return "%(name)s%(idx)s = (%(t)s)0" % \ - {'name': self.c_kernel_arg_name(i, j), 't': t, 'idx': idx} - elif self.data[i, j]._is_vector_field: - size = np.prod(self.data[i, j].dims) - return "memset(%(name)s, 0, sizeof(%(t)s) * %(size)s)" % \ - {'name': self.c_kernel_arg_name(i, j), 't': t, 'size': size} - else: - raise RuntimeError("Don't know how to zero temp array for %s" % self) - def c_add_offset(self, is_facet=False): if not self.map.iterset._extruded: return "" @@ -571,20 +548,6 @@ def c_buffer_scatter_vec(self, count, i, j, mxofs, buf_name): "mxofs": " + %d" % (mxofs[0] * dim) if mxofs else ""} for o in range(dim)]) - def c_buffer_scatter_offset(self, count, i, j, ofs_name): - if self.data.dataset._extruded: - return '%(ofs_name)s = %(map_name)s[i_0]' % { - 'ofs_name': ofs_name, - 'map_name': 'xtr_%s' % self.c_map_name(0, i), - } - else: - return '%(ofs_name)s = %(map_name)s[i * %(arity)d + i_0] * %(dim)s' % { - 'ofs_name': ofs_name, - 'map_name': self.c_map_name(0, i), - 'arity': self.map.arity, - 'dim': self.data.split[i].cdim - } - class JITModule(base.JITModule): @@ -856,17 +819,6 @@ def wrapper_snippets(itspace, args, def itspace_loop(i, d): return "for (int i_%d=0; i_%d<%d; ++i_%d) {" % (i, i, d, i) - def c_const_arg(c): - return '%s *%s_' % (c.ctype, c.name) - - def c_const_init(c): - d = {'name': c.name, - 'type': c.ctype} - if c.cdim == 1: - return '%(name)s = *%(name)s_' % d - tmp = '%(name)s[%%(i)s] = %(name)s_[%%(i)s]' % d - return ';\n'.join([tmp % {'i': i} for i in range(c.cdim)]) - def extrusion_loop(): if direct: return "{"