diff --git a/amitools/binfmt/BinFmt.py b/amitools/binfmt/BinFmt.py index fae8ed5e..84183c10 100644 --- a/amitools/binfmt/BinFmt.py +++ b/amitools/binfmt/BinFmt.py @@ -1,53 +1,53 @@ - - from .elf.BinFmtELF import BinFmtELF from .hunk.BinFmtHunk import BinFmtHunk + class BinFmt: - def __init__(self): - self.formats = [BinFmtHunk(), BinFmtELF()] - - def get_format(self, path): - """get instance of BinFmt loader or None""" - with open(path, "rb") as f: - return self.get_format_fobj(f) - - def get_format_fobj(self, fobj): - """get instance of BinFmt loader or None""" - for f in self.formats: - if f.is_image_fobj(fobj): - return f - return None - - def is_image(self, path): - """check if a given file is a supported binary file""" - with open(path, "rb") as f: - return self.is_image_fobj(f) - - def is_image_fobj(self, fobj): - """check if a given file is a supported binary file""" - f = self.get_format_fobj(fobj) - return f is not None - - def load_image(self, path): - """load a binary file and return a BinImage. unknown format returns None""" - with open(path, "rb") as f: - return self.load_image_fobj(f) - - def load_image_fobj(self, fobj): - """load a binary file and return a BinImage. unknown format returns None""" - f = self.get_format_fobj(fobj) - if f is not None: - return f.load_image_fobj(fobj) - else: - return None + def __init__(self): + self.formats = [BinFmtHunk(), BinFmtELF()] + + def get_format(self, path): + """get instance of BinFmt loader or None""" + with open(path, "rb") as f: + return self.get_format_fobj(f) + + def get_format_fobj(self, fobj): + """get instance of BinFmt loader or None""" + for f in self.formats: + if f.is_image_fobj(fobj): + return f + return None + + def is_image(self, path): + """check if a given file is a supported binary file""" + with open(path, "rb") as f: + return self.is_image_fobj(f) + + def is_image_fobj(self, fobj): + """check if a given file is a supported binary file""" + f = self.get_format_fobj(fobj) + return f is not None + + def load_image(self, path): + """load a binary file and return a BinImage. unknown format returns None""" + with open(path, "rb") as f: + return self.load_image_fobj(f) + + def load_image_fobj(self, fobj): + """load a binary file and return a BinImage. unknown format returns None""" + f = self.get_format_fobj(fobj) + if f is not None: + return f.load_image_fobj(fobj) + else: + return None # mini test -if __name__ == '__main__': - import sys - bf = BinFmt() - for a in sys.argv[1:]: - ok = bf.is_image(a) - bi = bf.load_image(a) - print(a, ok, str(bi)) +if __name__ == "__main__": + import sys + + bf = BinFmt() + for a in sys.argv[1:]: + ok = bf.is_image(a) + bi = bf.load_image(a) + print(a, ok, str(bi)) diff --git a/amitools/binfmt/BinImage.py b/amitools/binfmt/BinImage.py index 247b795d..2796fa02 100644 --- a/amitools/binfmt/BinImage.py +++ b/amitools/binfmt/BinImage.py @@ -1,279 +1,282 @@ - SEGMENT_TYPE_CODE = 0 SEGMENT_TYPE_DATA = 1 SEGMENT_TYPE_BSS = 2 SEGMENT_FLAG_READ_ONLY = 1 -segment_type_names = [ - "CODE", "DATA", "BSS" -] +segment_type_names = ["CODE", "DATA", "BSS"] BIN_IMAGE_TYPE_HUNK = 0 BIN_IMAGE_TYPE_ELF = 1 -bin_image_type_names = [ - "hunk", "elf" -] +bin_image_type_names = ["hunk", "elf"] + class Reloc: - def __init__(self, offset, width=2, addend=0): - self.offset = offset - self.width = width - self.addend = addend + def __init__(self, offset, width=2, addend=0): + self.offset = offset + self.width = width + self.addend = addend - def get_offset(self): - return self.offset + def get_offset(self): + return self.offset - def get_width(self): - return self.width + def get_width(self): + return self.width - def get_addend(self): - return self.addend + def get_addend(self): + return self.addend class Relocations: - def __init__(self, to_seg): - self.to_seg = to_seg - self.entries = [] + def __init__(self, to_seg): + self.to_seg = to_seg + self.entries = [] - def add_reloc(self, reloc): - self.entries.append(reloc) + def add_reloc(self, reloc): + self.entries.append(reloc) - def get_relocs(self): - return self.entries + def get_relocs(self): + return self.entries class Symbol: - def __init__(self, offset, name, file_name=None): - self.offset = offset - self.name = name - self.file_name = file_name + def __init__(self, offset, name, file_name=None): + self.offset = offset + self.name = name + self.file_name = file_name - def get_offset(self): - return self.offset + def get_offset(self): + return self.offset - def get_name(self): - return self.name + def get_name(self): + return self.name - def get_file_name(self): - return self.file_name + def get_file_name(self): + return self.file_name class SymbolTable: - def __init__(self): - self.symbols = [] + def __init__(self): + self.symbols = [] - def add_symbol(self, symbol): - self.symbols.append(symbol) + def add_symbol(self, symbol): + self.symbols.append(symbol) - def get_symbols(self): - return self.symbols + def get_symbols(self): + return self.symbols class DebugLineEntry: - def __init__(self, offset, src_line, flags=0): - self.offset = offset - self.src_line = src_line - self.flags = flags - self.file_ = None + def __init__(self, offset, src_line, flags=0): + self.offset = offset + self.src_line = src_line + self.flags = flags + self.file_ = None - def get_offset(self): - return self.offset + def get_offset(self): + return self.offset - def get_src_line(self): - return self.src_line + def get_src_line(self): + return self.src_line - def get_flags(self): - return self.flags + def get_flags(self): + return self.flags - def get_file(self): - return self.file_ + def get_file(self): + return self.file_ class DebugLineFile: - def __init__(self, src_file, dir_name=None, base_offset=0): - self.src_file = src_file - self.dir_name = dir_name - self.base_offset = base_offset - self.entries = [] + def __init__(self, src_file, dir_name=None, base_offset=0): + self.src_file = src_file + self.dir_name = dir_name + self.base_offset = base_offset + self.entries = [] - def get_src_file(self): - return self.src_file + def get_src_file(self): + return self.src_file - def get_dir_name(self): - return self.dir_name + def get_dir_name(self): + return self.dir_name - def get_entries(self): - return self.entries + def get_entries(self): + return self.entries - def get_base_offset(self): - return self.base_offset + def get_base_offset(self): + return self.base_offset - def add_entry(self, e): - self.entries.append(e) - e.file_ = self + def add_entry(self, e): + self.entries.append(e) + e.file_ = self class DebugLine: - def __init__(self): - self.files = [] + def __init__(self): + self.files = [] - def add_file(self, src_file): - self.files.append(src_file) + def add_file(self, src_file): + self.files.append(src_file) - def get_files(self): - return self.files + def get_files(self): + return self.files class Segment: - def __init__(self, seg_type, size, data=None, flags=0): - self.seg_type = seg_type - self.size = size - self.data = data - self.flags = flags - self.relocs = {} - self.symtab = None - self.id = None - self.file_data = None - self.debug_line = None - - def __str__(self): - # relocs - relocs = [] - for to_seg in self.relocs: - r = self.relocs[to_seg] - relocs.append("(#%d:size=%d)" % (to_seg.id, len(r.entries))) - # symtab - if self.symtab is not None: - symtab = "symtab=#%d" % len(self.symtab.symbols) - else: - symtab = "" - # debug_line - if self.debug_line is not None: - dl_files = self.debug_line.get_files() - file_info = [] - for dl_file in dl_files: - n = len(dl_file.entries) - file_info.append("(%s:#%d)" % (dl_file.src_file, n)) - debug_line = "debug_line=" + ",".join(file_info) - else: - debug_line = "" - # summary - return "[#%d:%s:size=%d,flags=%d,%s,%s,%s]" % (self.id, - segment_type_names[self.seg_type], self.size, self.flags, - ",".join(relocs), symtab, debug_line) - - def get_type(self): - return self.seg_type - - def get_type_name(self): - return segment_type_names[self.seg_type] - - def get_size(self): - return self.size - - def get_data(self): - return self.data - - def add_reloc(self, to_seg, relocs): - self.relocs[to_seg] = relocs - - def get_reloc_to_segs(self): - keys = list(self.relocs.keys()) - return sorted(keys, key=lambda x: x.id) - - def get_reloc(self, to_seg): - if to_seg in self.relocs: - return self.relocs[to_seg] - else: - return None - - def set_symtab(self, symtab): - self.symtab = symtab - - def get_symtab(self): - return self.symtab - - def set_debug_line(self, debug_line): - self.debug_line = debug_line - - def get_debug_line(self): - return self.debug_line - - def set_file_data(self, file_data): - """set associated loaded binary file""" - self.file_data = file_data - - def get_file_data(self): - """get associated loaded binary file""" - return self.file_data - - def find_symbol(self, offset): - symtab = self.get_symtab() - if symtab is None: - return None - for symbol in symtab.get_symbols(): - off = symbol.get_offset() - if off == offset: - return symbol.get_name() - return None - - def find_reloc(self, offset, size): - to_segs = self.get_reloc_to_segs() - for to_seg in to_segs: - reloc = self.get_reloc(to_seg) - for r in reloc.get_relocs(): - off = r.get_offset() - if off >= offset and off <= (offset + size): - return r,to_seg,off - return None - - def find_debug_line(self, offset): - debug_line = self.debug_line - if debug_line is None: - return None - for df in debug_line.get_files(): - for e in df.get_entries(): - if e.get_offset() == offset: - return e - return None + def __init__(self, seg_type, size, data=None, flags=0): + self.seg_type = seg_type + self.size = size + self.data = data + self.flags = flags + self.relocs = {} + self.symtab = None + self.id = None + self.file_data = None + self.debug_line = None + + def __str__(self): + # relocs + relocs = [] + for to_seg in self.relocs: + r = self.relocs[to_seg] + relocs.append("(#%d:size=%d)" % (to_seg.id, len(r.entries))) + # symtab + if self.symtab is not None: + symtab = "symtab=#%d" % len(self.symtab.symbols) + else: + symtab = "" + # debug_line + if self.debug_line is not None: + dl_files = self.debug_line.get_files() + file_info = [] + for dl_file in dl_files: + n = len(dl_file.entries) + file_info.append("(%s:#%d)" % (dl_file.src_file, n)) + debug_line = "debug_line=" + ",".join(file_info) + else: + debug_line = "" + # summary + return "[#%d:%s:size=%d,flags=%d,%s,%s,%s]" % ( + self.id, + segment_type_names[self.seg_type], + self.size, + self.flags, + ",".join(relocs), + symtab, + debug_line, + ) + + def get_type(self): + return self.seg_type + + def get_type_name(self): + return segment_type_names[self.seg_type] + + def get_size(self): + return self.size + + def get_data(self): + return self.data + + def add_reloc(self, to_seg, relocs): + self.relocs[to_seg] = relocs + + def get_reloc_to_segs(self): + keys = list(self.relocs.keys()) + return sorted(keys, key=lambda x: x.id) + + def get_reloc(self, to_seg): + if to_seg in self.relocs: + return self.relocs[to_seg] + else: + return None + + def set_symtab(self, symtab): + self.symtab = symtab + + def get_symtab(self): + return self.symtab + + def set_debug_line(self, debug_line): + self.debug_line = debug_line + + def get_debug_line(self): + return self.debug_line + + def set_file_data(self, file_data): + """set associated loaded binary file""" + self.file_data = file_data + + def get_file_data(self): + """get associated loaded binary file""" + return self.file_data + + def find_symbol(self, offset): + symtab = self.get_symtab() + if symtab is None: + return None + for symbol in symtab.get_symbols(): + off = symbol.get_offset() + if off == offset: + return symbol.get_name() + return None + + def find_reloc(self, offset, size): + to_segs = self.get_reloc_to_segs() + for to_seg in to_segs: + reloc = self.get_reloc(to_seg) + for r in reloc.get_relocs(): + off = r.get_offset() + if off >= offset and off <= (offset + size): + return r, to_seg, off + return None + + def find_debug_line(self, offset): + debug_line = self.debug_line + if debug_line is None: + return None + for df in debug_line.get_files(): + for e in df.get_entries(): + if e.get_offset() == offset: + return e + return None class BinImage: - """A binary image contains all the segments of a program's binary image. + """A binary image contains all the segments of a program's binary image. """ - def __init__(self, file_type): - self.segments = [] - self.file_data = None - self.file_type = file_type - - def __str__(self): - return "<%s>" % ",".join(map(str,self.segments)) - - def get_size(self): - total_size = 0 - for seg in self.segments: - total_size += seg.get_size() - return total_size - - def add_segment(self, seg): - seg.id = len(self.segments) - self.segments.append(seg) - - def get_segments(self): - return self.segments - - def set_file_data(self, file_data): - """set associated loaded binary file""" - self.file_data = file_data - - def get_file_data(self): - """get associated loaded binary file""" - return self.file_data - - def get_segment_names(self): - names = [] - for seg in self.segments: - names.append(seg.get_type_name()) - return names + + def __init__(self, file_type): + self.segments = [] + self.file_data = None + self.file_type = file_type + + def __str__(self): + return "<%s>" % ",".join(map(str, self.segments)) + + def get_size(self): + total_size = 0 + for seg in self.segments: + total_size += seg.get_size() + return total_size + + def add_segment(self, seg): + seg.id = len(self.segments) + self.segments.append(seg) + + def get_segments(self): + return self.segments + + def set_file_data(self, file_data): + """set associated loaded binary file""" + self.file_data = file_data + + def get_file_data(self): + """get associated loaded binary file""" + return self.file_data + + def get_segment_names(self): + names = [] + for seg in self.segments: + names.append(seg.get_type_name()) + return names diff --git a/amitools/binfmt/Disassemble.py b/amitools/binfmt/Disassemble.py index 49d83ca0..b37f9118 100644 --- a/amitools/binfmt/Disassemble.py +++ b/amitools/binfmt/Disassemble.py @@ -1,77 +1,86 @@ from amitools.vamos.machine import DisAsm from .BinImage import * + class Disassemble: - """allows to disassemble code segments of a BinImage""" - def __init__(self, cpu='68000'): - self.disasm = DisAsm.create(cpu) + """allows to disassemble code segments of a BinImage""" + + def __init__(self, cpu="68000"): + self.disasm = DisAsm.create(cpu) - def _get_line_info(self, segment, addr, size): - infos = [] - # info about src line - d = segment.find_debug_line(addr) - if d is not None: - f = d.get_file() - infos.append("src %10s:%d [%s]" % (f.get_src_file(), - d.get_src_line(), - f.get_dir_name())) - # info about relocation - r = segment.find_reloc(addr, size) - if r is not None: - delta = r[2] - addr - infos.append("reloc +%02d: (#%02d + %08x)" % (delta, r[1].id, r[0].addend)) - return infos + def _get_line_info(self, segment, addr, size): + infos = [] + # info about src line + d = segment.find_debug_line(addr) + if d is not None: + f = d.get_file() + infos.append( + "src %10s:%d [%s]" + % (f.get_src_file(), d.get_src_line(), f.get_dir_name()) + ) + # info about relocation + r = segment.find_reloc(addr, size) + if r is not None: + delta = r[2] - addr + infos.append("reloc +%02d: (#%02d + %08x)" % (delta, r[1].id, r[0].addend)) + return infos - def disassemble(self, segment, bin_img): - # make sure its a code segment - if segment.seg_type != SEGMENT_TYPE_CODE: - return None + def disassemble(self, segment, bin_img): + # make sure its a code segment + if segment.seg_type != SEGMENT_TYPE_CODE: + return None - # generate raw assembly - data = segment.data - lines = self.disasm.disassemble_block(data) + # generate raw assembly + data = segment.data + lines = self.disasm.disassemble_block(data) - # process lines - result = [] - for l in lines: - addr = l[0] - word = l[1] - code = l[2] + # process lines + result = [] + for l in lines: + addr = l[0] + word = l[1] + code = l[2] - # try to find a symbol for this addr - symbol = segment.find_symbol(addr) - if symbol is not None: - line = "\t\t\t\t%s:" % symbol - result.append(line) + # try to find a symbol for this addr + symbol = segment.find_symbol(addr) + if symbol is not None: + line = "\t\t\t\t%s:" % symbol + result.append(line) - # create final line - line = "%08x\t%-20s\t%-30s " % (addr," ".join(["%04x" %x for x in word]),code) + # create final line + line = "%08x\t%-20s\t%-30s " % ( + addr, + " ".join(["%04x" % x for x in word]), + code, + ) - # create line info - size = len(word) * 2 - info = self._get_line_info(segment, addr, size) - if info is None or len(info) == 0: - result.append(line) - else: - result.append(line + "; " + info[0]) - spc = " " * len(line) - for i in info[1:]: - result.append(spc + "; " + i) + # create line info + size = len(word) * 2 + info = self._get_line_info(segment, addr, size) + if info is None or len(info) == 0: + result.append(line) + else: + result.append(line + "; " + info[0]) + spc = " " * len(line) + for i in info[1:]: + result.append(spc + "; " + i) + + return result - return result # mini test -if __name__ == '__main__': - import sys - from .BinFmt import BinFmt - bf = BinFmt() - for a in sys.argv[1:]: - bi = bf.load_image(a) - if bi is not None: - print(a) - d = Disassemble() - for seg in bi.get_segments(): - if seg.seg_type == SEGMENT_TYPE_CODE: - lines = d.disassemble(seg, bi) - for l in lines: - print(l) +if __name__ == "__main__": + import sys + from .BinFmt import BinFmt + + bf = BinFmt() + for a in sys.argv[1:]: + bi = bf.load_image(a) + if bi is not None: + print(a) + d = Disassemble() + for seg in bi.get_segments(): + if seg.seg_type == SEGMENT_TYPE_CODE: + lines = d.disassemble(seg, bi) + for l in lines: + print(l) diff --git a/amitools/binfmt/Dumper.py b/amitools/binfmt/Dumper.py index 6d9573f1..7e621096 100644 --- a/amitools/binfmt/Dumper.py +++ b/amitools/binfmt/Dumper.py @@ -1,59 +1,68 @@ - from .BinImage import * import amitools.util.HexDump as HexDump + class Dumper: - def __init__(self, bin_img): - self.bin_img = bin_img + def __init__(self, bin_img): + self.bin_img = bin_img - def dump(self, hex_dump=False, show_reloc=False, show_symbols=False, show_debug_line=False): - for seg in self.bin_img.get_segments(): - seg_type = seg.seg_type - seg_type_name = segment_type_names[seg_type] - size = seg.size - print("#%02d %04s %08x/%10d" % (seg.id, seg_type_name, size, size)) - # show hex dump? - data = seg.data - if data is not None and hex_dump: - HexDump.print_hex(data, 4) - # show reloc - if show_reloc: - to_segs = seg.get_reloc_to_segs() - for to_seg in to_segs: - print(" RELOC to #%02d" % (to_seg.id)) - reloc = seg.get_reloc(to_seg) - for r in reloc.get_relocs(): - off = r.get_offset() - add = r.get_addend() - print(" %08x/%10d +%08x/%10d" % (off, off, add, add)) - # show symbols - if show_symbols: - symtab = seg.get_symtab() - if symtab is not None: - print(" SYMBOLS") - for sym in symtab.get_symbols(): - off = sym.get_offset() - name = sym.get_name() - print(" %08x/%10d %s" % (off, off, name)) - # show debug info - if show_debug_line: - debug_line = seg.get_debug_line() - if debug_line is not None: - print(" DEBUG LINE") - for f in debug_line.get_files(): - print(" FILE: [%s] %s" % (f.get_dir_name(), f.get_src_file())) - for e in f.get_entries(): - print(" %08x %d" % (e.get_offset(), e.get_src_line())) + def dump( + self, + hex_dump=False, + show_reloc=False, + show_symbols=False, + show_debug_line=False, + ): + for seg in self.bin_img.get_segments(): + seg_type = seg.seg_type + seg_type_name = segment_type_names[seg_type] + size = seg.size + print("#%02d %04s %08x/%10d" % (seg.id, seg_type_name, size, size)) + # show hex dump? + data = seg.data + if data is not None and hex_dump: + HexDump.print_hex(data, 4) + # show reloc + if show_reloc: + to_segs = seg.get_reloc_to_segs() + for to_seg in to_segs: + print(" RELOC to #%02d" % (to_seg.id)) + reloc = seg.get_reloc(to_seg) + for r in reloc.get_relocs(): + off = r.get_offset() + add = r.get_addend() + print(" %08x/%10d +%08x/%10d" % (off, off, add, add)) + # show symbols + if show_symbols: + symtab = seg.get_symtab() + if symtab is not None: + print(" SYMBOLS") + for sym in symtab.get_symbols(): + off = sym.get_offset() + name = sym.get_name() + print(" %08x/%10d %s" % (off, off, name)) + # show debug info + if show_debug_line: + debug_line = seg.get_debug_line() + if debug_line is not None: + print(" DEBUG LINE") + for f in debug_line.get_files(): + print( + " FILE: [%s] %s" % (f.get_dir_name(), f.get_src_file()) + ) + for e in f.get_entries(): + print(" %08x %d" % (e.get_offset(), e.get_src_line())) # mini test -if __name__ == '__main__': - import sys - from .BinFmt import BinFmt - bf = BinFmt() - for a in sys.argv[1:]: - bi = bf.load_image(a) - if bi is not None: - print(a) - d = Dumper(bi) - d.dump(True, True, True, True) +if __name__ == "__main__": + import sys + from .BinFmt import BinFmt + + bf = BinFmt() + for a in sys.argv[1:]: + bi = bf.load_image(a) + if bi is not None: + print(a) + d = Dumper(bi) + d.dump(True, True, True, True) diff --git a/amitools/binfmt/Relocate.py b/amitools/binfmt/Relocate.py index 389a9ea8..9567b273 100644 --- a/amitools/binfmt/Relocate.py +++ b/amitools/binfmt/Relocate.py @@ -1,116 +1,120 @@ - import struct + class Relocate: - """Relocate a BinImage to given addresses""" - def __init__(self, bin_img, verbose=False): - self.bin_img = bin_img - self.verbose = verbose - - def get_sizes(self): - """return a list of the required sizes for all sections""" - sizes = [] - for segment in self.bin_img.get_segments(): - size = segment.size - sizes.append(size) - return sizes - - def get_total_size(self, padding=0): - """return the total size of all segments appended. useful for one large blob""" - sizes = self.get_sizes() - total = 0 - for s in sizes: - total += s + padding - return total - - def get_seq_addrs(self, base_addr, padding=0): - """generate a sequence of addresses for continous segments in one blob""" - sizes = self.get_sizes() - addrs = [] - addr = base_addr - for s in sizes: - addrs.append(addr) - addr += s + padding - return addrs - - def relocate_one_block(self, base_addr, padding=0): - total_size = self.get_total_size(padding) - data = bytearray(total_size) - addrs = self.get_seq_addrs(base_addr, padding) - offset = 0 - segs = self.bin_img.get_segments() - for segment in segs: - self._copy_data(data, segment, offset) - self._reloc_data(data, segment, addrs, offset) - offset += segment.size + padding - return data - - def relocate(self, addrs, in_data=None): - """perform relocations on segments and return relocated data""" - segs = self.bin_img.get_segments() - if len(segs) != len(addrs): - raise ValueError("addrs != segments") - datas = [] - for segment in segs: - # allocate new buffer - data = bytearray(segment.size) - self._copy_data(data, segment) - self._reloc_data(data, segment, addrs) - datas.append(data) - return datas - - def _copy_data(self, data, segment, offset=0): - # allocate segment data - size = segment.size - src_data = segment.data - if src_data is not None: - src_len = len(src_data) - data[offset:src_len+offset] = src_data - - if self.verbose: - print("#%02d @%06x +%06x" % (segment.id, addrs[segment.id], size)) - - def _reloc_data(self, data, segment, addrs, offset=0): - # find relocations - to_segs = segment.get_reloc_to_segs() - for to_seg in to_segs: - # get target segment's address - to_id = to_seg.id - to_addr = addrs[to_id] - # get relocations - reloc = segment.get_reloc(to_seg) - for r in reloc.get_relocs(): - self._reloc(segment.id, data, r, to_addr, to_id, offset) - - def _reloc(self, my_id, data, reloc, to_addr, to_id, extra_offset): - """relocate one entry""" - offset = reloc.get_offset() + extra_offset - delta = self._read_long(data, offset) + reloc.addend - addr = to_addr + delta - self._write_long(data, offset, addr) - if self.verbose: - print("#%02d + %06x: %06x (delta) + @%06x (#%02d) -> %06x" % - (my_id, offset, delta, to_addr, to_id, addr)) - - def _read_long(self, data, offset): - d = data[offset:offset+4] - return struct.unpack(">i",d)[0] - - def _write_long(self, data, offset, value): - d = struct.pack(">i",value) - data[offset:offset+4] = d + """Relocate a BinImage to given addresses""" + + def __init__(self, bin_img, verbose=False): + self.bin_img = bin_img + self.verbose = verbose + + def get_sizes(self): + """return a list of the required sizes for all sections""" + sizes = [] + for segment in self.bin_img.get_segments(): + size = segment.size + sizes.append(size) + return sizes + + def get_total_size(self, padding=0): + """return the total size of all segments appended. useful for one large blob""" + sizes = self.get_sizes() + total = 0 + for s in sizes: + total += s + padding + return total + + def get_seq_addrs(self, base_addr, padding=0): + """generate a sequence of addresses for continous segments in one blob""" + sizes = self.get_sizes() + addrs = [] + addr = base_addr + for s in sizes: + addrs.append(addr) + addr += s + padding + return addrs + + def relocate_one_block(self, base_addr, padding=0): + total_size = self.get_total_size(padding) + data = bytearray(total_size) + addrs = self.get_seq_addrs(base_addr, padding) + offset = 0 + segs = self.bin_img.get_segments() + for segment in segs: + self._copy_data(data, segment, offset) + self._reloc_data(data, segment, addrs, offset) + offset += segment.size + padding + return data + + def relocate(self, addrs, in_data=None): + """perform relocations on segments and return relocated data""" + segs = self.bin_img.get_segments() + if len(segs) != len(addrs): + raise ValueError("addrs != segments") + datas = [] + for segment in segs: + # allocate new buffer + data = bytearray(segment.size) + self._copy_data(data, segment) + self._reloc_data(data, segment, addrs) + datas.append(data) + return datas + + def _copy_data(self, data, segment, offset=0): + # allocate segment data + size = segment.size + src_data = segment.data + if src_data is not None: + src_len = len(src_data) + data[offset : src_len + offset] = src_data + + if self.verbose: + print("#%02d @%06x +%06x" % (segment.id, addrs[segment.id], size)) + + def _reloc_data(self, data, segment, addrs, offset=0): + # find relocations + to_segs = segment.get_reloc_to_segs() + for to_seg in to_segs: + # get target segment's address + to_id = to_seg.id + to_addr = addrs[to_id] + # get relocations + reloc = segment.get_reloc(to_seg) + for r in reloc.get_relocs(): + self._reloc(segment.id, data, r, to_addr, to_id, offset) + + def _reloc(self, my_id, data, reloc, to_addr, to_id, extra_offset): + """relocate one entry""" + offset = reloc.get_offset() + extra_offset + delta = self._read_long(data, offset) + reloc.addend + addr = to_addr + delta + self._write_long(data, offset, addr) + if self.verbose: + print( + "#%02d + %06x: %06x (delta) + @%06x (#%02d) -> %06x" + % (my_id, offset, delta, to_addr, to_id, addr) + ) + + def _read_long(self, data, offset): + d = data[offset : offset + 4] + return struct.unpack(">i", d)[0] + + def _write_long(self, data, offset, value): + d = struct.pack(">i", value) + data[offset : offset + 4] = d # mini test -if __name__ == '__main__': - import sys - from .BinFmt import BinFmt - bf = BinFmt() - for a in sys.argv[1:]: - bi = bf.load_image(a) - if bi is not None: - print(a) - r = Relocate(bi, True) - addrs = r.get_seq_addrs(0) - datas = r.relocate(addrs) - data = r.relocate_one_block(0) +if __name__ == "__main__": + import sys + from .BinFmt import BinFmt + + bf = BinFmt() + for a in sys.argv[1:]: + bi = bf.load_image(a) + if bi is not None: + print(a) + r = Relocate(bi, True) + addrs = r.get_seq_addrs(0) + datas = r.relocate(addrs) + data = r.relocate_one_block(0) diff --git a/amitools/binfmt/elf/BinFmtELF.py b/amitools/binfmt/elf/BinFmtELF.py index 2efb59a2..b425096b 100644 --- a/amitools/binfmt/elf/BinFmtELF.py +++ b/amitools/binfmt/elf/BinFmtELF.py @@ -1,5 +1,3 @@ - - from amitools.binfmt.BinImage import * from .ELFFile import * from .ELF import * @@ -8,176 +6,176 @@ class BinFmtELF: - """Handle Amiga m68k binaries in ELF format (usually from AROS)""" - - def is_image(self, path): - """check if a given file is a supported ELF file""" - with open(path, "rb") as f: - return self.is_image_fobj(f) - - def is_image_fobj(self, fobj): - """check if a given fobj is a supported ELF file""" - try: - pos = fobj.tell() - - # read identifier - ident = ELFIdentifier() - ident_data = fobj.read(16) - ident.parse(ident_data) - - # read header - hdr = ELFHeader() - hdr_data = fobj.read(36) - hdr.parse(hdr_data) - - # seek back - fobj.seek(pos,0) - - # check header - return self.is_supported_elf(ident, hdr) - except ELFParseError: - return False - - def is_supported_elf(self, ident, hdr): - """check ELF header if its a m68k binary""" - if hdr.machine != EM_68K: - return False - if ident.osabi not in (ELFOSABI_SYSV, ELFOSABI_AROS): - return False - return True - - def load_image(self, path): - """load a BinImage from an ELF file given via path""" - with open(path, "rb") as f: - return self.load_image_fobj(f) - - def load_image_fobj(self, fobj): - """load a BinImage from an ELF file given via file object""" - # read elf file - reader = ELFReader() - elf = reader.load(fobj) - # create bin image and assign elf file - bi = BinImage(BIN_IMAGE_TYPE_ELF) - bi.set_file_data(elf) - # walk through elf sections - sect_to_seg = {} - for sect in elf.sections: - # determine segment type - seg_type = None - name = sect.name_str - flags = 0 - if name == b'.text': - seg_type = SEGMENT_TYPE_CODE - elif name == b'.data': - seg_type = SEGMENT_TYPE_DATA - elif name == b'.rodata': - seg_type = SEGMENT_TYPE_DATA - flags = SEGMENT_FLAG_READ_ONLY - elif name == b'.bss': - seg_type = SEGMENT_TYPE_BSS - # we got a segment - if seg_type is not None: - size = sect.header.size - data = sect.data - seg = Segment(seg_type, size, data, flags) - bi.add_segment(seg) - # assign section to segment - seg.set_file_data(sect) - sect_to_seg[sect] = seg - - # now run through segments to add relocations - bi_segs = bi.get_segments() - for seg in bi_segs: - # retrieve associated ELF section - sect = seg.get_file_data() - - # any relocations? - rela = sect.get_rela() - num_rela = len(rela) - if num_rela > 0: - self.add_elf_rela(sect, seg, sect_to_seg) - - # any symbols? - symbols = sect.get_symbols() - num_syms = len(symbols) - if num_syms > 0: - self.add_elf_symbols(symbols, seg) - - # try to add debug info - ddl = DwarfDebugLine() - got = ddl.decode(elf) - if got: - self.add_debug_line(ddl, bi, sect_to_seg) - - return bi - - def add_elf_rela(self, sect, seg, sect_to_seg): - for tgt_sect in sect.get_rela_sections(): - # is this a relocation to a used section? - if tgt_sect in sect_to_seg: - to_seg = sect_to_seg[tgt_sect] - rl = Relocations(to_seg) - seg.add_reloc(to_seg, rl) - # add relocations - for rel in sect.get_rela_by_section(tgt_sect): - r = Reloc(rel.offset, addend=rel.section_addend) - rl.add_reloc(r) - - def add_elf_symbols(self, symbols, seg): - symtab = SymbolTable() - seg.set_symtab(symtab) - for sym in symbols: - # add entry - off = sym.value - name = sym.name_str - file_sym = sym.file_sym - if file_sym is not None: - file_name = file_sym.name_str - else: - file_name = None - symbol = Symbol(off, name, file_name) - symtab.add_symbol(symbol) - - def add_debug_line(self, ddl, bi, sect_to_seg): - seg_to_dl = {} - matrix = ddl.get_matrix() - for row in matrix: - sect = row.section - if sect in sect_to_seg: - segment = sect_to_seg[sect] - - # fetch debug info - if segment in seg_to_dl: - dl, file_to_df = seg_to_dl[segment] - else: - dl = DebugLine() - file_to_df = {} - segment.set_debug_line(dl) - seg_to_dl[segment] = (dl, file_to_df) - - # fetch file instance - fid = row.file - if fid in file_to_df: - df = file_to_df[fid] - else: - df = DebugLineFile(ddl.get_file_name(fid), - ddl.get_file_dir(fid)) - dl.add_file(df) - file_to_df[fid] = df - - # add entry - e = DebugLineEntry(row.address, row.line) - df.add_entry(e) + """Handle Amiga m68k binaries in ELF format (usually from AROS)""" + + def is_image(self, path): + """check if a given file is a supported ELF file""" + with open(path, "rb") as f: + return self.is_image_fobj(f) + + def is_image_fobj(self, fobj): + """check if a given fobj is a supported ELF file""" + try: + pos = fobj.tell() + + # read identifier + ident = ELFIdentifier() + ident_data = fobj.read(16) + ident.parse(ident_data) + + # read header + hdr = ELFHeader() + hdr_data = fobj.read(36) + hdr.parse(hdr_data) + + # seek back + fobj.seek(pos, 0) + + # check header + return self.is_supported_elf(ident, hdr) + except ELFParseError: + return False + + def is_supported_elf(self, ident, hdr): + """check ELF header if its a m68k binary""" + if hdr.machine != EM_68K: + return False + if ident.osabi not in (ELFOSABI_SYSV, ELFOSABI_AROS): + return False + return True + + def load_image(self, path): + """load a BinImage from an ELF file given via path""" + with open(path, "rb") as f: + return self.load_image_fobj(f) + + def load_image_fobj(self, fobj): + """load a BinImage from an ELF file given via file object""" + # read elf file + reader = ELFReader() + elf = reader.load(fobj) + # create bin image and assign elf file + bi = BinImage(BIN_IMAGE_TYPE_ELF) + bi.set_file_data(elf) + # walk through elf sections + sect_to_seg = {} + for sect in elf.sections: + # determine segment type + seg_type = None + name = sect.name_str + flags = 0 + if name == b".text": + seg_type = SEGMENT_TYPE_CODE + elif name == b".data": + seg_type = SEGMENT_TYPE_DATA + elif name == b".rodata": + seg_type = SEGMENT_TYPE_DATA + flags = SEGMENT_FLAG_READ_ONLY + elif name == b".bss": + seg_type = SEGMENT_TYPE_BSS + # we got a segment + if seg_type is not None: + size = sect.header.size + data = sect.data + seg = Segment(seg_type, size, data, flags) + bi.add_segment(seg) + # assign section to segment + seg.set_file_data(sect) + sect_to_seg[sect] = seg + + # now run through segments to add relocations + bi_segs = bi.get_segments() + for seg in bi_segs: + # retrieve associated ELF section + sect = seg.get_file_data() + + # any relocations? + rela = sect.get_rela() + num_rela = len(rela) + if num_rela > 0: + self.add_elf_rela(sect, seg, sect_to_seg) + + # any symbols? + symbols = sect.get_symbols() + num_syms = len(symbols) + if num_syms > 0: + self.add_elf_symbols(symbols, seg) + + # try to add debug info + ddl = DwarfDebugLine() + got = ddl.decode(elf) + if got: + self.add_debug_line(ddl, bi, sect_to_seg) + + return bi + + def add_elf_rela(self, sect, seg, sect_to_seg): + for tgt_sect in sect.get_rela_sections(): + # is this a relocation to a used section? + if tgt_sect in sect_to_seg: + to_seg = sect_to_seg[tgt_sect] + rl = Relocations(to_seg) + seg.add_reloc(to_seg, rl) + # add relocations + for rel in sect.get_rela_by_section(tgt_sect): + r = Reloc(rel.offset, addend=rel.section_addend) + rl.add_reloc(r) + + def add_elf_symbols(self, symbols, seg): + symtab = SymbolTable() + seg.set_symtab(symtab) + for sym in symbols: + # add entry + off = sym.value + name = sym.name_str + file_sym = sym.file_sym + if file_sym is not None: + file_name = file_sym.name_str + else: + file_name = None + symbol = Symbol(off, name, file_name) + symtab.add_symbol(symbol) + + def add_debug_line(self, ddl, bi, sect_to_seg): + seg_to_dl = {} + matrix = ddl.get_matrix() + for row in matrix: + sect = row.section + if sect in sect_to_seg: + segment = sect_to_seg[sect] + + # fetch debug info + if segment in seg_to_dl: + dl, file_to_df = seg_to_dl[segment] + else: + dl = DebugLine() + file_to_df = {} + segment.set_debug_line(dl) + seg_to_dl[segment] = (dl, file_to_df) + + # fetch file instance + fid = row.file + if fid in file_to_df: + df = file_to_df[fid] + else: + df = DebugLineFile(ddl.get_file_name(fid), ddl.get_file_dir(fid)) + dl.add_file(df) + file_to_df[fid] = df + + # add entry + e = DebugLineEntry(row.address, row.line) + df.add_entry(e) # mini test -if __name__ == '__main__': - import sys - bf = BinFmtELF() - for a in sys.argv[1:]: - if bf.is_image(a): - print("loading", a) - bi = bf.load_image(a) - print(bi) - else: - print("NO ELF:", a) +if __name__ == "__main__": + import sys + + bf = BinFmtELF() + for a in sys.argv[1:]: + if bf.is_image(a): + print("loading", a) + bi = bf.load_image(a) + print(bi) + else: + print("NO ELF:", a) diff --git a/amitools/binfmt/elf/DwarfDebugLine.py b/amitools/binfmt/elf/DwarfDebugLine.py index 409ebf12..09e1f6e5 100644 --- a/amitools/binfmt/elf/DwarfDebugLine.py +++ b/amitools/binfmt/elf/DwarfDebugLine.py @@ -1,324 +1,351 @@ - import io import struct + class LineState: - def __init__(self, is_stmt=False): - self.address = 0 - self.file = 1 - self.line = 1 - self.column = 0 - self.is_stmt = is_stmt - self.basic_block = False - self.end_sequence = False - self.section = None + def __init__(self, is_stmt=False): + self.address = 0 + self.file = 1 + self.line = 1 + self.column = 0 + self.is_stmt = is_stmt + self.basic_block = False + self.end_sequence = False + self.section = None - def clone(self): - state = LineState() - state.address = self.address - state.file = self.file - state.line = self.line - state.column = self.column - state.is_stmt = self.is_stmt - state.basic_block = self.basic_block - state.end_sequence = self.end_sequence - state.section = self.section - return state + def clone(self): + state = LineState() + state.address = self.address + state.file = self.file + state.line = self.line + state.column = self.column + state.is_stmt = self.is_stmt + state.basic_block = self.basic_block + state.end_sequence = self.end_sequence + state.section = self.section + return state - def __str__(self): - return "[address=%08x file=%d line=%d column=%d is_stmt=%s basic_block=%s end_sequence=%d]" % \ - (self.address, self.file, self.line, self.column, self.is_stmt, - self.basic_block, self.end_sequence) + def __str__(self): + return ( + "[address=%08x file=%d line=%d column=%d is_stmt=%s basic_block=%s end_sequence=%d]" + % ( + self.address, + self.file, + self.line, + self.column, + self.is_stmt, + self.basic_block, + self.end_sequence, + ) + ) class DwarfDebugLine: - """decode .debug_line Dwarf line debug sections""" - def __init__(self, verbose=False): - self.input = None - self.error = None - self.verbose = verbose - self.matrix = None + """decode .debug_line Dwarf line debug sections""" - def _log(self, *args): - if self.verbose: - print(*args) + def __init__(self, verbose=False): + self.input = None + self.error = None + self.verbose = verbose + self.matrix = None - def decode(self, elf_file): - # get section with debug info - debug_line = elf_file.get_section_by_name(".debug_line") - if debug_line is None: - self.error = "No .debug_line section found! No debug info?" - return False - # get (optional) relocations - rela = elf_file.get_section_by_name(".rela.debug_line") - # start parsing - self.input = io.StringIO(debug_line.data) - # decode header - if not self.decode_header(): - return False - if self.verbose: - self.dump_header() - # decode line program - matrix = [] - state = LineState(self.default_is_stmt) - log = self._log - while True: - # read opcode - opc_ch = self.input.read(1) - if len(opc_ch) == 0: - break - opc = ord(opc_ch) - log("opcode=", opc) - # 0 = extended opcode - if opc == 0: - opc_size = self.read_leb128() - sub_opc = ord(self.input.read(1)) - log(" sub_opcode=", sub_opc) - # 1: DW_LNE_end_sequence - if sub_opc == 1: - state.end_sequence = True - line = state.clone() - matrix.append(line) - state.__init__() - log("DW_LNE_end_sequence:", line) - # 2: DW_LNE_set_address - elif sub_opc == 2: - pos = self.input.tell() - addr = self.read_long() - addend, sect = self.find_rela(rela, pos) - state.address = addr + addend - state.section = sect - log("DW_LNE_set_address: %08x sect=%s" % (state.address, sect)) - # 3: DW_LNE_set_file - elif sub_opc == 3: - tup = self.decode_file() - self.files.append(tup) - log("DW_LNE_set_file", tup) - # other (unknown) ext opc - else: - log("unknown sub opcode!") - self.input.seek(opc_size-1,1) - # standard opcodes - elif opc < self.opc_base: - # 1: DW_LNS_copy - if opc == 1: - line = state.clone() - matrix.append(line) - log("DW_LNS_copy:", line) - state.basic_block = False - # 2: DW_LNS_advance_pc - elif opc == 2: - offset = self.read_leb128() * self.min_instr_len - state.address += offset - log("DW_LNS_advance_pc: +%d -> %08x" % (offset, state.address)) - # 3: DW_LNS_advance_line - elif opc == 3: - offset = self.read_sleb128() - state.line += offset - log("DW_LNS_advance_line: +%d -> %d" % (offset, state.line)) - # 4: DW_LNS_set_file - elif opc == 4: - state.file = self.read_leb128() - log("DW_LNS_set_file", state.file) - # 5: DW_LNS_set_column - elif opc == 5: - state.column = self.read_leb128() - log("DW_LNS_set_column", state.column) - # 6: DW_LNS_negate_stmt - elif opc == 6: - state.is_stmt = not state.is_stmt - log("DW_LNS_negate_stmt", state.is_stmt) - # 7: DW_LNS_set_basic_block - elif opc == 7: - state.basic_block = True - log("DW_LNS_set_basic_block") - # 8: DW_LNS_const_add_pc - elif opc == 8: - (addr_addend,_) = self.decode_special_opcode(255) - state.address += addr_addend - log("DW_LNS_const_add_pc: +%d -> %08x" % (addr_addend, state.address)) - # 9: DW_LNS_fixed_advance_pc - elif opc == 9: - offset = self.read_word() - state.address += offset - log("DW_LNS_fixed_advance_pc: %+08x" % offset) - # other (unknown) opc - else: - num_args = self.std_opc_lens[opc] - log("skip unknown: num_args=", num_args) - for i in range(num_args): - self.read_leb128() - # special opcodes: - else: - (addr_addend, line_addend) = self.decode_special_opcode(opc) - state.address += addr_addend - state.line += line_addend - state.basic_block = False - line = state.clone() - matrix.append(line) - log("special", (opc - self.opc_base), line) - # done - self.matrix = matrix - return True + def _log(self, *args): + if self.verbose: + print(*args) - def get_matrix(self): - return self.matrix + def decode(self, elf_file): + # get section with debug info + debug_line = elf_file.get_section_by_name(".debug_line") + if debug_line is None: + self.error = "No .debug_line section found! No debug info?" + return False + # get (optional) relocations + rela = elf_file.get_section_by_name(".rela.debug_line") + # start parsing + self.input = io.StringIO(debug_line.data) + # decode header + if not self.decode_header(): + return False + if self.verbose: + self.dump_header() + # decode line program + matrix = [] + state = LineState(self.default_is_stmt) + log = self._log + while True: + # read opcode + opc_ch = self.input.read(1) + if len(opc_ch) == 0: + break + opc = ord(opc_ch) + log("opcode=", opc) + # 0 = extended opcode + if opc == 0: + opc_size = self.read_leb128() + sub_opc = ord(self.input.read(1)) + log(" sub_opcode=", sub_opc) + # 1: DW_LNE_end_sequence + if sub_opc == 1: + state.end_sequence = True + line = state.clone() + matrix.append(line) + state.__init__() + log("DW_LNE_end_sequence:", line) + # 2: DW_LNE_set_address + elif sub_opc == 2: + pos = self.input.tell() + addr = self.read_long() + addend, sect = self.find_rela(rela, pos) + state.address = addr + addend + state.section = sect + log("DW_LNE_set_address: %08x sect=%s" % (state.address, sect)) + # 3: DW_LNE_set_file + elif sub_opc == 3: + tup = self.decode_file() + self.files.append(tup) + log("DW_LNE_set_file", tup) + # other (unknown) ext opc + else: + log("unknown sub opcode!") + self.input.seek(opc_size - 1, 1) + # standard opcodes + elif opc < self.opc_base: + # 1: DW_LNS_copy + if opc == 1: + line = state.clone() + matrix.append(line) + log("DW_LNS_copy:", line) + state.basic_block = False + # 2: DW_LNS_advance_pc + elif opc == 2: + offset = self.read_leb128() * self.min_instr_len + state.address += offset + log("DW_LNS_advance_pc: +%d -> %08x" % (offset, state.address)) + # 3: DW_LNS_advance_line + elif opc == 3: + offset = self.read_sleb128() + state.line += offset + log("DW_LNS_advance_line: +%d -> %d" % (offset, state.line)) + # 4: DW_LNS_set_file + elif opc == 4: + state.file = self.read_leb128() + log("DW_LNS_set_file", state.file) + # 5: DW_LNS_set_column + elif opc == 5: + state.column = self.read_leb128() + log("DW_LNS_set_column", state.column) + # 6: DW_LNS_negate_stmt + elif opc == 6: + state.is_stmt = not state.is_stmt + log("DW_LNS_negate_stmt", state.is_stmt) + # 7: DW_LNS_set_basic_block + elif opc == 7: + state.basic_block = True + log("DW_LNS_set_basic_block") + # 8: DW_LNS_const_add_pc + elif opc == 8: + (addr_addend, _) = self.decode_special_opcode(255) + state.address += addr_addend + log( + "DW_LNS_const_add_pc: +%d -> %08x" + % (addr_addend, state.address) + ) + # 9: DW_LNS_fixed_advance_pc + elif opc == 9: + offset = self.read_word() + state.address += offset + log("DW_LNS_fixed_advance_pc: %+08x" % offset) + # other (unknown) opc + else: + num_args = self.std_opc_lens[opc] + log("skip unknown: num_args=", num_args) + for i in range(num_args): + self.read_leb128() + # special opcodes: + else: + (addr_addend, line_addend) = self.decode_special_opcode(opc) + state.address += addr_addend + state.line += line_addend + state.basic_block = False + line = state.clone() + matrix.append(line) + log("special", (opc - self.opc_base), line) + # done + self.matrix = matrix + return True - def get_file_dir(self, idx): - f = self.files[idx-1] - dir_idx = f[1] - if dir_idx > 0: - dir_name = self.inc_dirs[dir_idx-1] - else: - dir_name = "" - return dir_name + def get_matrix(self): + return self.matrix - def get_file_name(self, idx): - return self.files[idx-1][0] + def get_file_dir(self, idx): + f = self.files[idx - 1] + dir_idx = f[1] + if dir_idx > 0: + dir_name = self.inc_dirs[dir_idx - 1] + else: + dir_name = "" + return dir_name + + def get_file_name(self, idx): + return self.files[idx - 1][0] - def find_rela(self, rela_section, pos): - if rela_section is not None: - for rela in rela_section.rela: - if rela.offset == pos: - return rela.addend, rela.section - return 0,None + def find_rela(self, rela_section, pos): + if rela_section is not None: + for rela in rela_section.rela: + if rela.offset == pos: + return rela.addend, rela.section + return 0, None - def decode_special_opcode(self, opc): - adj_opc = opc - self.opc_base - addr_addend = (adj_opc // self.line_range) * self.min_instr_len - line_addend = self.line_base + (adj_opc % self.line_range) - return (addr_addend, line_addend) + def decode_special_opcode(self, opc): + adj_opc = opc - self.opc_base + addr_addend = (adj_opc // self.line_range) * self.min_instr_len + line_addend = self.line_base + (adj_opc % self.line_range) + return (addr_addend, line_addend) - def decode_header(self): - # header - self.unit_length = self.read_long() - self.version = self.read_word() - if self.version != 2: - self.error = "Can only decode DWARF 2 debug info" - return False - self.header_length = self.read_long() - self.min_instr_len = self.read_byte() - self.default_is_stmt = self.read_byte() - self.line_base = self.read_sbyte() - self.line_range = self.read_byte() - self.opc_base = self.read_byte() - # 9 standard opcode lengths - self.std_opc_lens = [] - n = self.opc_base - if n > 0: - for i in range(n-1): - l = self.read_byte() - self.std_opc_lens.append(l) - # 10 include dirs - self.inc_dirs = [] - while True: - inc_dir = self.read_string() - if inc_dir == "": - break - self.inc_dirs.append(inc_dir) - # 11 file names - self.files = [] - while True: - tup = self.decode_file() - if tup is None: - break - self.files.append(tup) - # end header: check header size - pos = self.input.tell() - hdr_len = pos - 10 - if hdr_len != self.header_length: - self.error = "Error size mismatch: %d != %d" % (hdr_len, self.header_length) - return False - return True + def decode_header(self): + # header + self.unit_length = self.read_long() + self.version = self.read_word() + if self.version != 2: + self.error = "Can only decode DWARF 2 debug info" + return False + self.header_length = self.read_long() + self.min_instr_len = self.read_byte() + self.default_is_stmt = self.read_byte() + self.line_base = self.read_sbyte() + self.line_range = self.read_byte() + self.opc_base = self.read_byte() + # 9 standard opcode lengths + self.std_opc_lens = [] + n = self.opc_base + if n > 0: + for i in range(n - 1): + l = self.read_byte() + self.std_opc_lens.append(l) + # 10 include dirs + self.inc_dirs = [] + while True: + inc_dir = self.read_string() + if inc_dir == "": + break + self.inc_dirs.append(inc_dir) + # 11 file names + self.files = [] + while True: + tup = self.decode_file() + if tup is None: + break + self.files.append(tup) + # end header: check header size + pos = self.input.tell() + hdr_len = pos - 10 + if hdr_len != self.header_length: + self.error = "Error size mismatch: %d != %d" % (hdr_len, self.header_length) + return False + return True - def decode_file(self): - file_name = self.read_string() - if file_name == "": - return None - dir_idx = self.read_leb128() - last_mod = self.read_leb128() - file_size = self.read_leb128() - return (file_name, dir_idx, last_mod, file_size) + def decode_file(self): + file_name = self.read_string() + if file_name == "": + return None + dir_idx = self.read_leb128() + last_mod = self.read_leb128() + file_size = self.read_leb128() + return (file_name, dir_idx, last_mod, file_size) - def dump_header(self): - print("unit_length=%x version=%d header_length=%x max_instr_len=%d " - "default_is_stmt=%d line_base=%d line_range=%d opc_base=%d" % - (self.unit_length, self.version, self.header_length, - self.min_instr_len, self.default_is_stmt, self.line_base, - self.line_range, self.opc_base)) - print("std_opc_lens:",",".join(map(str,self.std_opc_lens))) - print("inc_dirs") - for d in self.inc_dirs: - print(d) - print("files") - for f in self.files: - print(f) + def dump_header(self): + print( + "unit_length=%x version=%d header_length=%x max_instr_len=%d " + "default_is_stmt=%d line_base=%d line_range=%d opc_base=%d" + % ( + self.unit_length, + self.version, + self.header_length, + self.min_instr_len, + self.default_is_stmt, + self.line_base, + self.line_range, + self.opc_base, + ) + ) + print("std_opc_lens:", ",".join(map(str, self.std_opc_lens))) + print("inc_dirs") + for d in self.inc_dirs: + print(d) + print("files") + for f in self.files: + print(f) - def read_string(self): - result = [] - while True: - ch = self.input.read(1) - if ord(ch) == 0: - break - result.append(ch) - return "".join(result) + def read_string(self): + result = [] + while True: + ch = self.input.read(1) + if ord(ch) == 0: + break + result.append(ch) + return "".join(result) - def read_leb128(self): - result = 0 - shift = 0 - while True: - byte = self.read_byte() - result |= (byte & 0x7f) << shift - if byte & 0x80 == 0: - break - shift += 7 - return result + def read_leb128(self): + result = 0 + shift = 0 + while True: + byte = self.read_byte() + result |= (byte & 0x7F) << shift + if byte & 0x80 == 0: + break + shift += 7 + return result - def read_sleb128(self): - result = 0 - shift = 0 - while True: - byte = self.read_byte() - result |= (byte & 0x7f) << shift - shift += 7 - if byte & 0x80 == 0: - break - # negative? - if byte & 0x40 == 0x40: - mask = 1 << shift - result |= - mask - return result + def read_sleb128(self): + result = 0 + shift = 0 + while True: + byte = self.read_byte() + result |= (byte & 0x7F) << shift + shift += 7 + if byte & 0x80 == 0: + break + # negative? + if byte & 0x40 == 0x40: + mask = 1 << shift + result |= -mask + return result - def read_long(self): - data = self.input.read(4) - return struct.unpack(">I", data)[0] + def read_long(self): + data = self.input.read(4) + return struct.unpack(">I", data)[0] - def read_word(self): - data = self.input.read(2) - return struct.unpack(">H", data)[0] + def read_word(self): + data = self.input.read(2) + return struct.unpack(">H", data)[0] - def read_byte(self): - data = self.input.read(1) - return struct.unpack(">B", data)[0] + def read_byte(self): + data = self.input.read(1) + return struct.unpack(">B", data)[0] + + def read_sbyte(self): + data = self.input.read(1) + return struct.unpack(">b", data)[0] - def read_sbyte(self): - data = self.input.read(1) - return struct.unpack(">b", data)[0] # mini test -if __name__ == '__main__': - import sys - from .ELFReader import ELFReader - reader = ELFReader() - for a in sys.argv[1:]: - f = open(a, "rb") - ef = reader.load(f) - ddl = DwarfDebugLine(verbose=True) - ok = ddl.decode(ef) - if ok: - print("--- line matrix ---") - for row in ddl.get_matrix(): - name = ddl.get_file_name(row.file) - fdir = ddl.get_file_dir(row.file) - sect_name = row.section.name_str - print("%08x: %s [%s] %s:%d" % (row.address, sect_name, fdir, name, row.line)) +if __name__ == "__main__": + import sys + from .ELFReader import ELFReader + + reader = ELFReader() + for a in sys.argv[1:]: + f = open(a, "rb") + ef = reader.load(f) + ddl = DwarfDebugLine(verbose=True) + ok = ddl.decode(ef) + if ok: + print("--- line matrix ---") + for row in ddl.get_matrix(): + name = ddl.get_file_name(row.file) + fdir = ddl.get_file_dir(row.file) + sect_name = row.section.name_str + print( + "%08x: %s [%s] %s:%d" + % (row.address, sect_name, fdir, name, row.line) + ) diff --git a/amitools/binfmt/elf/ELF.py b/amitools/binfmt/elf/ELF.py index fd7a6492..709dcf0d 100644 --- a/amitools/binfmt/elf/ELF.py +++ b/amitools/binfmt/elf/ELF.py @@ -7,13 +7,7 @@ EM_68K = 4 -ET_values = { - 0: "NONE", - 1: "REL", - 2: "EXEC", - 3: "DYN", - 4: "CORE" -} +ET_values = {0: "NONE", 1: "REL", 2: "EXEC", 3: "DYN", 4: "CORE"} SHN_UNDEF = 0 SHT_SYMTAB = 2 @@ -22,69 +16,53 @@ SHT_NOBITS = 8 SHT_values = { - 0: "NULL", - 1: "PROGBITS", - 2: "SYMTAB", - 3: "STRTAB", - 4: "RELA", - 5: "HASH", - 6: "DYNAMIC", - 7: "NOTE", - 8: "NOBITS", - 9: "REL", - 10: "SHLIB", - 11: "DYNSYM", - 14: "INIT_ARRAY", - 15: "FINI_ARRAY", - 16: "PREINIT_ARRAY", - 17: "GROUP", - 18: "SYMTAB_SHNDX" + 0: "NULL", + 1: "PROGBITS", + 2: "SYMTAB", + 3: "STRTAB", + 4: "RELA", + 5: "HASH", + 6: "DYNAMIC", + 7: "NOTE", + 8: "NOBITS", + 9: "REL", + 10: "SHLIB", + 11: "DYNSYM", + 14: "INIT_ARRAY", + 15: "FINI_ARRAY", + 16: "PREINIT_ARRAY", + 17: "GROUP", + 18: "SYMTAB_SHNDX", } SHT_flags = { - 1: "WRITE", - 2: "ALLOC", - 4: "EXECINSTR", - 8: "MERGE", - 16: "STRINGS", - 32: "INFO_LINK", - 64: "LINK_ORDER", -128: "OS_NONCONFORMING", -256: "GROUP", -512: "TLS" + 1: "WRITE", + 2: "ALLOC", + 4: "EXECINSTR", + 8: "MERGE", + 16: "STRINGS", + 32: "INFO_LINK", + 64: "LINK_ORDER", + 128: "OS_NONCONFORMING", + 256: "GROUP", + 512: "TLS", } -SHN_values = { -0: "UND", -0xfff1: "ABS" -} +SHN_values = {0: "UND", 0xFFF1: "ABS"} -STB_values = { - 0: "LOCAL", - 1: "GLOBAL", - 2: "WEAK", - 3: "NUM" -} +STB_values = {0: "LOCAL", 1: "GLOBAL", 2: "WEAK", 3: "NUM"} STT_values = { - 0: "NOTYPE", - 1: "OBJECT", - 2: "FUNC", - 3: "SECTION", - 4: "FILE", - 5: "COMMON", - 6: "TLS", - 7: "NUM" + 0: "NOTYPE", + 1: "OBJECT", + 2: "FUNC", + 3: "SECTION", + 4: "FILE", + 5: "COMMON", + 6: "TLS", + 7: "NUM", } -STV_values = { - 0: "DEFAULT", - 1: "INTERNAL", - 2: "HIDDEN", - 3: "PROTECTED" -} +STV_values = {0: "DEFAULT", 1: "INTERNAL", 2: "HIDDEN", 3: "PROTECTED"} -R_68K_values = { -0: "68K_NONE", -1: "68K_32" -} +R_68K_values = {0: "68K_NONE", 1: "68K_32"} diff --git a/amitools/binfmt/elf/ELFDumper.py b/amitools/binfmt/elf/ELFDumper.py index e1d88de9..77a7e6d5 100644 --- a/amitools/binfmt/elf/ELFDumper.py +++ b/amitools/binfmt/elf/ELFDumper.py @@ -1,114 +1,145 @@ from .ELF import * -class ELFDumper: - def __init__(self, elf_file): - self.elf = elf_file - - def _dump_rela_entry(self, rel, prefix="\t\t\t"): - rel_sect = rel.section - sect_txt = "%s (%d) + %d" % (rel_sect.name_str, rel_sect.idx, rel.section_addend) - rel_symbol = rel.symbol - if rel_symbol is not None: - sym_txt = "%s (%d) + %d" % (rel_symbol.name_str, rel_symbol.idx, rel.addend) - else: - sym_txt = "" - print("%s%08x %-10s %-20s %s" % (prefix, rel.offset, rel.type_str, sect_txt, sym_txt)) - - def _dump_symbol(self, sym): - print("\t\t\t%08x %6d %-8s %-8s %-16s" % (sym.value, sym.size, sym.type_str, sym.bind_str, sym.name_str)) - - def dump_sections(self, show_relocs=False, show_debug=False): - print("ELF Sections") - print("id name size rela syms type flags") - for sect in self.elf.sections: - - # determine number of relocations - rela = sect.get_rela() - num_rela = len(rela) - - # determine number of symbols - symbols = sect.get_symbols() - num_syms = len(symbols) - - print("%2d %-16s %08x %4d %4d %-10s %s" % \ - (sect.idx, sect.name_str, sect.header.size, num_rela, num_syms, - sect.header.type_str, ",".join(sect.header.flags_dec))) - - # show relas - if show_relocs and num_rela > 0: - print("\t\tRelocations:") - for rel in rela: - self._dump_rela_entry(rel) - - # per segment relocations - for tgt_sect in sect.get_rela_sections(): - print("\t\tTo Section #%d:" % tgt_sect.idx) - for rel in sect.get_rela_by_section(tgt_sect): - self._dump_rela_entry(rel) - - # show symbols - if show_debug and num_syms > 0: - print("\t\tSymbols:") - for sym in symbols: - self._dump_symbol(sym) - - def dump_symbols(self): - print("ELF Symbol Table") - symtabs = self.elf.symtabs - if len(symtabs) == 0: - print("no symbols") - return - - print("idx value size type bind visible ndx name") - for symtab in symtabs: - for sym in symtab.get_table_symbols(): - txt = sym.shndx_str - if txt is None: - txt =sym.section.name_str - print("%4d %08x %6d %-8s %-8s %-8s %-16s %s" % \ - (sym.idx, sym.value, sym.size, sym.type_str, - sym.bind_str, sym.visibility_str, - txt, sym.name_str)) - - def dump_relas(self): - print("ELF Relocations") - rela_sects = self.elf.relas - if len(rela_sects) == 0: - print("no relocations") - return - - for rela_sect in rela_sects: - print(rela_sect.name_str, "linked to", rela_sect.reloc_section.name_str) - print(" offset type segment + addend symbol + addend") - num = 0 - for rela in rela_sect.get_relocations(): - self._dump_rela_entry(rela, prefix="%4d " % num) - num += 1 - - def dump_relas_by_sect(self): - print("ELF Relocations (by sections)") - for sect in self.elf.sections: - to_sects = sect.get_rela_sections() - if len(to_sects) > 0: - print(" section", sect.idx) - for to_sect in to_sects: - print(" -> section", to_sect.idx) - num = 0 - for rela in sect.get_rela_by_section(to_sect): - self._dump_rela_entry(rela, prefix=" %4d " % num) - num += 1 - - -if __name__ == '__main__': - from .ELFReader import ELFReader - import sys - reader = ELFReader() - for a in sys.argv[1:]: - f = open(a, "rb") - ef = reader.load(f) - dumper = ELFDumper(ef) - dumper.dump_sections(True, True) - dumper.dump_symbols() - dumper.dump_relas() - dumper.dump_relas_by_sect() +class ELFDumper: + def __init__(self, elf_file): + self.elf = elf_file + + def _dump_rela_entry(self, rel, prefix="\t\t\t"): + rel_sect = rel.section + sect_txt = "%s (%d) + %d" % ( + rel_sect.name_str, + rel_sect.idx, + rel.section_addend, + ) + rel_symbol = rel.symbol + if rel_symbol is not None: + sym_txt = "%s (%d) + %d" % (rel_symbol.name_str, rel_symbol.idx, rel.addend) + else: + sym_txt = "" + print( + "%s%08x %-10s %-20s %s" + % (prefix, rel.offset, rel.type_str, sect_txt, sym_txt) + ) + + def _dump_symbol(self, sym): + print( + "\t\t\t%08x %6d %-8s %-8s %-16s" + % (sym.value, sym.size, sym.type_str, sym.bind_str, sym.name_str) + ) + + def dump_sections(self, show_relocs=False, show_debug=False): + print("ELF Sections") + print("id name size rela syms type flags") + for sect in self.elf.sections: + + # determine number of relocations + rela = sect.get_rela() + num_rela = len(rela) + + # determine number of symbols + symbols = sect.get_symbols() + num_syms = len(symbols) + + print( + "%2d %-16s %08x %4d %4d %-10s %s" + % ( + sect.idx, + sect.name_str, + sect.header.size, + num_rela, + num_syms, + sect.header.type_str, + ",".join(sect.header.flags_dec), + ) + ) + + # show relas + if show_relocs and num_rela > 0: + print("\t\tRelocations:") + for rel in rela: + self._dump_rela_entry(rel) + + # per segment relocations + for tgt_sect in sect.get_rela_sections(): + print("\t\tTo Section #%d:" % tgt_sect.idx) + for rel in sect.get_rela_by_section(tgt_sect): + self._dump_rela_entry(rel) + + # show symbols + if show_debug and num_syms > 0: + print("\t\tSymbols:") + for sym in symbols: + self._dump_symbol(sym) + + def dump_symbols(self): + print("ELF Symbol Table") + symtabs = self.elf.symtabs + if len(symtabs) == 0: + print("no symbols") + return + + print( + "idx value size type bind visible ndx name" + ) + for symtab in symtabs: + for sym in symtab.get_table_symbols(): + txt = sym.shndx_str + if txt is None: + txt = sym.section.name_str + print( + "%4d %08x %6d %-8s %-8s %-8s %-16s %s" + % ( + sym.idx, + sym.value, + sym.size, + sym.type_str, + sym.bind_str, + sym.visibility_str, + txt, + sym.name_str, + ) + ) + + def dump_relas(self): + print("ELF Relocations") + rela_sects = self.elf.relas + if len(rela_sects) == 0: + print("no relocations") + return + + for rela_sect in rela_sects: + print(rela_sect.name_str, "linked to", rela_sect.reloc_section.name_str) + print(" offset type segment + addend symbol + addend") + num = 0 + for rela in rela_sect.get_relocations(): + self._dump_rela_entry(rela, prefix="%4d " % num) + num += 1 + + def dump_relas_by_sect(self): + print("ELF Relocations (by sections)") + for sect in self.elf.sections: + to_sects = sect.get_rela_sections() + if len(to_sects) > 0: + print(" section", sect.idx) + for to_sect in to_sects: + print(" -> section", to_sect.idx) + num = 0 + for rela in sect.get_rela_by_section(to_sect): + self._dump_rela_entry(rela, prefix=" %4d " % num) + num += 1 + + +if __name__ == "__main__": + from .ELFReader import ELFReader + import sys + + reader = ELFReader() + for a in sys.argv[1:]: + f = open(a, "rb") + ef = reader.load(f) + dumper = ELFDumper(ef) + dumper.dump_sections(True, True) + dumper.dump_symbols() + dumper.dump_relas() + dumper.dump_relas_by_sect() diff --git a/amitools/binfmt/elf/ELFFile.py b/amitools/binfmt/elf/ELFFile.py index 728412d5..0518468e 100644 --- a/amitools/binfmt/elf/ELFFile.py +++ b/amitools/binfmt/elf/ELFFile.py @@ -3,266 +3,294 @@ class ELFParseError(Exception): - def __init__(self, msg): - self.msg = msg + def __init__(self, msg): + self.msg = msg class ELFPart: - """base class for parts of ELF file""" - _names = [] - - def __init__(self): - for name in self._names: - setattr(self, name, None) - - def _parse_data(self, fmt, data): - flen = len(fmt) - nlen = len(self._names) - if flen != nlen: - raise ValueError("_parse_data size mismatch") - decoded = struct.unpack(">"+fmt, data) - if len(decoded) != nlen: - raise ELFParseError("data decode error") - for i in range(nlen): - setattr(self, self._names[i], decoded[i]) - - def _decode_flags(self, value, names): - result = [] - for mask in names: - if mask & value == mask: - result.append(names[mask]) - return result - - def _decode_value(self, value, names): - if value in names: - return names[value] - else: - return None + """base class for parts of ELF file""" + + _names = [] + + def __init__(self): + for name in self._names: + setattr(self, name, None) + + def _parse_data(self, fmt, data): + flen = len(fmt) + nlen = len(self._names) + if flen != nlen: + raise ValueError("_parse_data size mismatch") + decoded = struct.unpack(">" + fmt, data) + if len(decoded) != nlen: + raise ELFParseError("data decode error") + for i in range(nlen): + setattr(self, self._names[i], decoded[i]) + + def _decode_flags(self, value, names): + result = [] + for mask in names: + if mask & value == mask: + result.append(names[mask]) + return result + + def _decode_value(self, value, names): + if value in names: + return names[value] + else: + return None class ELFIdentifier(ELFPart): - _names = ["class_", "data", "version", "osabi", "abiversion"] + _names = ["class_", "data", "version", "osabi", "abiversion"] - def __init__(self): - ELFPart.__init__(self) + def __init__(self): + ELFPart.__init__(self) - def parse(self, ident_data): - # magic - magic = ident_data[0:4] - if magic != b"\177ELF": - raise ELFParseError("No ELF Magic found!") - self.class_ = ident_data[4] - self.data = ident_data[5] - self.version = ident_data[6] - self.osabi = ident_data[7] - self.abiversion = ident_data[8] + def parse(self, ident_data): + # magic + magic = ident_data[0:4] + if magic != b"\177ELF": + raise ELFParseError("No ELF Magic found!") + self.class_ = ident_data[4] + self.data = ident_data[5] + self.version = ident_data[6] + self.osabi = ident_data[7] + self.abiversion = ident_data[8] class ELFHeader(ELFPart): - _names = ['type_','machine','version','entry','phoff','shoff','flags','ehsize','phentsize','phnum','shentsize','shnum','shstrndx'] - - def __init__(self): - ELFPart.__init__(self) - - def parse(self, data): - fmt = "HHIIIIIHHHHHH" - self._parse_data(fmt,data) - self.type_str = self._decode_value(self.type_,ET_values) + _names = [ + "type_", + "machine", + "version", + "entry", + "phoff", + "shoff", + "flags", + "ehsize", + "phentsize", + "phnum", + "shentsize", + "shnum", + "shstrndx", + ] + + def __init__(self): + ELFPart.__init__(self) + + def parse(self, data): + fmt = "HHIIIIIHHHHHH" + self._parse_data(fmt, data) + self.type_str = self._decode_value(self.type_, ET_values) class ELFSectionHeader(ELFPart): - _names = ['name','type_','flags','addr','offset','size','link','info','addralign','entsize'] + _names = [ + "name", + "type_", + "flags", + "addr", + "offset", + "size", + "link", + "info", + "addralign", + "entsize", + ] + + def __init__(self): + ELFPart.__init__(self) + + def parse(self, data): + fmt = "IIIIIIIIII" + self._parse_data(fmt, data) + self.flags_dec = self._decode_flags(self.flags, SHT_flags) + self.type_str = self._decode_value(self.type_, SHT_values) - def __init__(self): - ELFPart.__init__(self) - - def parse(self, data): - fmt = "IIIIIIIIII" - self._parse_data(fmt,data) - self.flags_dec = self._decode_flags(self.flags, SHT_flags) - self.type_str = self._decode_value(self.type_, SHT_values) # ----- Sections ----- + class ELFSection: - def __init__(self, header, idx): - self.header = header - self.idx = idx - self.data = None - # resolved data - self.name_str = None - self.symbols = [] - self.relocations = None - self.reloc_by_sect = {} - - def get_rela(self): - """return a list with all relocations""" - if self.relocations is not None: - return self.relocations.rela - else: - return [] - - def get_rela_by_section(self, sect): - """return a list of relocations from the given section""" - if sect in self.reloc_by_sect: - return self.reloc_by_sect[sect] - else: - return [] - - def get_rela_sections(self): - return sorted(list(self.reloc_by_sect.keys()), key=lambda x : x.idx) - - def get_symbols(self): - return self.symbols + def __init__(self, header, idx): + self.header = header + self.idx = idx + self.data = None + # resolved data + self.name_str = None + self.symbols = [] + self.relocations = None + self.reloc_by_sect = {} + + def get_rela(self): + """return a list with all relocations""" + if self.relocations is not None: + return self.relocations.rela + else: + return [] + + def get_rela_by_section(self, sect): + """return a list of relocations from the given section""" + if sect in self.reloc_by_sect: + return self.reloc_by_sect[sect] + else: + return [] + + def get_rela_sections(self): + return sorted(list(self.reloc_by_sect.keys()), key=lambda x: x.idx) + + def get_symbols(self): + return self.symbols class ELFSectionWithData(ELFSection): - def __init__(self, header, index, data): - ELFSection.__init__(self, header, index) - self.data = data + def __init__(self, header, index, data): + ELFSection.__init__(self, header, index) + self.data = data class ELFSectionStringTable(ELFSectionWithData): - def __init__(self, header, index, data): - ELFSectionWithData.__init__(self, header, index, data) - self.strtab = None - - def decode(self): - l = len(self.data) - o = 0 - strtab = [] - while o < l: - n = self.data.find(b'\0',o) - if n == -1: - raise ELFParseError("Invalid strtab!") - if n > 0: - s = self.data[o:n] - else: - s = "" - strtab.append((o,s)) - o = n+1 - self.strtab = strtab - - def get_string(self, off): - old = (0,"") - for e in self.strtab: - if off < e[0]: - delta = off - old[0] - return old[1][delta:] - old = e - delta = off - self.strtab[-1][0] - return self.strtab[-1][1][delta:] + def __init__(self, header, index, data): + ELFSectionWithData.__init__(self, header, index, data) + self.strtab = None + + def decode(self): + l = len(self.data) + o = 0 + strtab = [] + while o < l: + n = self.data.find(b"\0", o) + if n == -1: + raise ELFParseError("Invalid strtab!") + if n > 0: + s = self.data[o:n] + else: + s = "" + strtab.append((o, s)) + o = n + 1 + self.strtab = strtab + + def get_string(self, off): + old = (0, "") + for e in self.strtab: + if off < e[0]: + delta = off - old[0] + return old[1][delta:] + old = e + delta = off - self.strtab[-1][0] + return self.strtab[-1][1][delta:] class ELFSymbol(ELFPart): - _names = ['name','value','size','info','other','shndx'] - - def __init__(self, idx): - ELFPart.__init__(self) - self.idx = idx - self.bind = None - self.type_ = None - self.visibility = None - # will be resolved - self.name_str = None - self.section = None - - def parse(self, data): - fmt = "IIIBBH" - self._parse_data(fmt, data) - # decode sub values - self.bind = self.info >> 4 - self.type_ = self.info & 0xf - self.visibility = self.other & 3 - # string values - self.bind_str = self._decode_value(self.bind, STB_values) - self.type_str = self._decode_value(self.type_, STT_values) - self.visibility_str = self._decode_value(self.visibility, STV_values) - self.shndx_str = self._decode_value(self.shndx, SHN_values) + _names = ["name", "value", "size", "info", "other", "shndx"] + + def __init__(self, idx): + ELFPart.__init__(self) + self.idx = idx + self.bind = None + self.type_ = None + self.visibility = None + # will be resolved + self.name_str = None + self.section = None + + def parse(self, data): + fmt = "IIIBBH" + self._parse_data(fmt, data) + # decode sub values + self.bind = self.info >> 4 + self.type_ = self.info & 0xF + self.visibility = self.other & 3 + # string values + self.bind_str = self._decode_value(self.bind, STB_values) + self.type_str = self._decode_value(self.type_, STT_values) + self.visibility_str = self._decode_value(self.visibility, STV_values) + self.shndx_str = self._decode_value(self.shndx, SHN_values) class ELFSectionSymbolTable(ELFSectionWithData): - def __init__(self, header, index, data): - ELFSectionWithData.__init__(self, header, index, data) - self.symtab = [] - - def decode(self): - entsize = self.header.entsize - num = self.header.size // entsize - symtab = [] - self.symtab = symtab - off = 0 - idx = 0 - for n in range(num): - entry = ELFSymbol(idx) - entry_data = self.data[off:off+entsize] - entry.parse(entry_data) - symtab.append(entry) - off += entsize - idx += 1 - return True - - def get_symbol(self, idx): - return self.symtab[idx] - - def get_table_symbols(self): - return self.symtab + def __init__(self, header, index, data): + ELFSectionWithData.__init__(self, header, index, data) + self.symtab = [] + + def decode(self): + entsize = self.header.entsize + num = self.header.size // entsize + symtab = [] + self.symtab = symtab + off = 0 + idx = 0 + for n in range(num): + entry = ELFSymbol(idx) + entry_data = self.data[off : off + entsize] + entry.parse(entry_data) + symtab.append(entry) + off += entsize + idx += 1 + return True + + def get_symbol(self, idx): + return self.symtab[idx] + + def get_table_symbols(self): + return self.symtab class ELFRelocationWithAddend(ELFPart): - _names = ['offset','info','addend'] + _names = ["offset", "info", "addend"] - def __init__(self): - ELFPart.__init__(self) - self.sym = None - self.type_ = None - self.type_str = None - self.symbol = None + def __init__(self): + ELFPart.__init__(self) + self.sym = None + self.type_ = None + self.type_str = None + self.symbol = None - def parse(self, data): - fmt = "IIi" - self._parse_data(fmt, data) - # decode sym and type - self.sym = self.info >> 8 - self.type_ = self.info & 0xff - self.type_str = self._decode_value(self.type_, R_68K_values) + def parse(self, data): + fmt = "IIi" + self._parse_data(fmt, data) + # decode sym and type + self.sym = self.info >> 8 + self.type_ = self.info & 0xFF + self.type_str = self._decode_value(self.type_, R_68K_values) class ELFSectionRelocationsWithAddend(ELFSectionWithData): - def __init__(self, header, index, data): - ELFSectionWithData.__init__(self, header, index, data) - self.rela = [] - self.symtab = None - self.reloc_section = None - - def decode(self): - entsize = self.header.entsize - num = self.header.size // entsize - rela = [] - self.rela = rela - off = 0 - for n in range(num): - entry = ELFRelocationWithAddend() - entry_data = self.data[off:off+entsize] - entry.parse(entry_data) - rela.append(entry) - off += entsize - - def get_relocations(self): - return self.rela + def __init__(self, header, index, data): + ELFSectionWithData.__init__(self, header, index, data) + self.rela = [] + self.symtab = None + self.reloc_section = None + + def decode(self): + entsize = self.header.entsize + num = self.header.size // entsize + rela = [] + self.rela = rela + off = 0 + for n in range(num): + entry = ELFRelocationWithAddend() + entry_data = self.data[off : off + entsize] + entry.parse(entry_data) + rela.append(entry) + off += entsize + + def get_relocations(self): + return self.rela class ELFFile: - def __init__(self): - self.identifier = None - self.header = None - self.section_hdrs = [] - self.sections = [] - self.symtabs = [] - self.relas = [] - - def get_section_by_name(self, name): - for sect in self.sections: - if sect.name_str == name: - return sect - return None + def __init__(self): + self.identifier = None + self.header = None + self.section_hdrs = [] + self.sections = [] + self.symtabs = [] + self.relas = [] + + def get_section_by_name(self, name): + for sect in self.sections: + if sect.name_str == name: + return sect + return None diff --git a/amitools/binfmt/elf/ELFReader.py b/amitools/binfmt/elf/ELFReader.py index d3baa2db..d2dc450a 100644 --- a/amitools/binfmt/elf/ELFReader.py +++ b/amitools/binfmt/elf/ELFReader.py @@ -7,214 +7,213 @@ class ELFReader: - - def _load_section_headers(self, f, ef): - shoff = ef.header.shoff - shentsize = ef.header.shentsize - f.seek(shoff, os.SEEK_SET) - shnum = ef.header.shnum - for i in range(shnum): - sh = ELFSectionHeader() - sh_data = f.read(shentsize) - sh.parse(sh_data) - ef.section_hdrs.append(sh) - - def _load_sections(self, f, ef): - sect_hdrs = ef.section_hdrs - idx = 0 - for sect_hdr in sect_hdrs: - idx += 1 - sect = self._load_section(f, sect_hdr, idx) - ef.sections.append(sect) - - def _load_section(self, f, sect_hdr, idx): - t = sect_hdr.type_ - size = sect_hdr.size - if t == SHT_NOBITS or size == 0: - sect = ELFSection(sect_hdr, idx) - else: - # read data - offset = sect_hdr.offset - f.seek(offset, os.SEEK_SET) - data = f.read(size) - # decode? - if t == SHT_STRTAB: - sect = ELFSectionStringTable(sect_hdr, idx, data) - sect.decode() - elif t == SHT_SYMTAB: - sect = ELFSectionSymbolTable(sect_hdr, idx, data) - sect.decode() - elif t == SHT_RELA: - sect = ELFSectionRelocationsWithAddend(sect_hdr, idx, data) - sect.decode() - else: - sect = ELFSectionWithData(sect_hdr, idx, data) - - return sect - - def _name_section(self, section, strtab): - off = section.header.name - section.name_str = strtab.get_string(off) - - def _resolve_symtab_names(self, sect, sections): - # get linked string table - strtab_seg_num = sect.header.link - if strtab_seg_num < 1 or strtab_seg_num >= len(sections): - raise ELFParseError("Invalid strtab for symtab: "+strtab_seg_num) - strtab = sections[strtab_seg_num] - if strtab.__class__ != ELFSectionStringTable: - raise ELFParserError("Invalid strtab segment for symtab") - # resolve all symbol names - for sym in sect.symtab: - sym.name_str = strtab.get_string(sym.name) - - def _resolve_symtab_indices(self, sect, sections): - for sym in sect.symtab: - if sym.shndx_str == None: - # refers a valid section - idx = sym.shndx - sym.section = sections[idx] - - def _assign_symbols_to_sections(self, sect): - src_file_sym = None - all_symbols = [] - for sym in sect.symtab: - sym_type = sym.type_str - if sym_type == 'FILE': - # store file symbol for following symbols - src_file_sym = sym - elif sym_type in ('OBJECT','FUNC','NOTYPE'): - # add containing file symbol and its name - if src_file_sym != None: - sym.file_sym = src_file_sym + def _load_section_headers(self, f, ef): + shoff = ef.header.shoff + shentsize = ef.header.shentsize + f.seek(shoff, os.SEEK_SET) + shnum = ef.header.shnum + for i in range(shnum): + sh = ELFSectionHeader() + sh_data = f.read(shentsize) + sh.parse(sh_data) + ef.section_hdrs.append(sh) + + def _load_sections(self, f, ef): + sect_hdrs = ef.section_hdrs + idx = 0 + for sect_hdr in sect_hdrs: + idx += 1 + sect = self._load_section(f, sect_hdr, idx) + ef.sections.append(sect) + + def _load_section(self, f, sect_hdr, idx): + t = sect_hdr.type_ + size = sect_hdr.size + if t == SHT_NOBITS or size == 0: + sect = ELFSection(sect_hdr, idx) else: - sym.file_sym = None - # add symbol to segment - sym_sect = sym.section - if sym_sect is not None: - sym_sect.symbols.append(sym) - # list of all symbols assigned - all_symbols.append(sym_sect.symbols) - # now sort all symbol lists - for symbols in all_symbols: - symbols.sort(key=lambda x : x.value) - - - def _resolve_rela_links(self, sect, sections): - link = sect.header.link - info = sect.header.info - num_sects = len(sections) - if link == 0 or link >= num_sects: - raise ELFParseError("Invalid rela link!") - if info == 0 or info >= num_sects: - raise ELFParseError("Invalid rela info!") - - # info_seg -> src segment we will apply rela on - src_sect = sections[info] - sect.reloc_section = src_sect - - # link_seg -> symbol table - sect.symtab = sections[link] - - # store link in segment for this relocation - src_sect.relocations = sect - - # a map for rela by tgt segment - by_sect = {} - src_sect.reloc_by_sect = by_sect - - # now process all rela entries - symtab = sect.symtab - for entry in sect.rela: - # look up symbol of rela entry - sym_idx = entry.sym - sym = symtab.get_symbol(sym_idx) - entry.symbol = sym - # copy section we relocate from - entry.section = sym.section - # calc addend in segment - entry.section_addend = entry.addend + sym.value - - # clear symbol if its empty - if sym.name_str == "": - entry.symbol = None - - # add entry to section list - tgt_sect = entry.section - if tgt_sect in by_sect: - by_sect_list = by_sect[tgt_sect] - else: - by_sect_list = [] - by_sect[tgt_sect] = by_sect_list - by_sect_list.append(entry) - - # sort all by_seg entries - for sect in by_sect: - by_sect_list = by_sect[sect] - by_sect_list.sort(key=lambda x : x.offset) - - def load(self, f): - """load an ELF file from the given file object f + # read data + offset = sect_hdr.offset + f.seek(offset, os.SEEK_SET) + data = f.read(size) + # decode? + if t == SHT_STRTAB: + sect = ELFSectionStringTable(sect_hdr, idx, data) + sect.decode() + elif t == SHT_SYMTAB: + sect = ELFSectionSymbolTable(sect_hdr, idx, data) + sect.decode() + elif t == SHT_RELA: + sect = ELFSectionRelocationsWithAddend(sect_hdr, idx, data) + sect.decode() + else: + sect = ELFSectionWithData(sect_hdr, idx, data) + + return sect + + def _name_section(self, section, strtab): + off = section.header.name + section.name_str = strtab.get_string(off) + + def _resolve_symtab_names(self, sect, sections): + # get linked string table + strtab_seg_num = sect.header.link + if strtab_seg_num < 1 or strtab_seg_num >= len(sections): + raise ELFParseError("Invalid strtab for symtab: " + strtab_seg_num) + strtab = sections[strtab_seg_num] + if strtab.__class__ != ELFSectionStringTable: + raise ELFParserError("Invalid strtab segment for symtab") + # resolve all symbol names + for sym in sect.symtab: + sym.name_str = strtab.get_string(sym.name) + + def _resolve_symtab_indices(self, sect, sections): + for sym in sect.symtab: + if sym.shndx_str == None: + # refers a valid section + idx = sym.shndx + sym.section = sections[idx] + + def _assign_symbols_to_sections(self, sect): + src_file_sym = None + all_symbols = [] + for sym in sect.symtab: + sym_type = sym.type_str + if sym_type == "FILE": + # store file symbol for following symbols + src_file_sym = sym + elif sym_type in ("OBJECT", "FUNC", "NOTYPE"): + # add containing file symbol and its name + if src_file_sym != None: + sym.file_sym = src_file_sym + else: + sym.file_sym = None + # add symbol to segment + sym_sect = sym.section + if sym_sect is not None: + sym_sect.symbols.append(sym) + # list of all symbols assigned + all_symbols.append(sym_sect.symbols) + # now sort all symbol lists + for symbols in all_symbols: + symbols.sort(key=lambda x: x.value) + + def _resolve_rela_links(self, sect, sections): + link = sect.header.link + info = sect.header.info + num_sects = len(sections) + if link == 0 or link >= num_sects: + raise ELFParseError("Invalid rela link!") + if info == 0 or info >= num_sects: + raise ELFParseError("Invalid rela info!") + + # info_seg -> src segment we will apply rela on + src_sect = sections[info] + sect.reloc_section = src_sect + + # link_seg -> symbol table + sect.symtab = sections[link] + + # store link in segment for this relocation + src_sect.relocations = sect + + # a map for rela by tgt segment + by_sect = {} + src_sect.reloc_by_sect = by_sect + + # now process all rela entries + symtab = sect.symtab + for entry in sect.rela: + # look up symbol of rela entry + sym_idx = entry.sym + sym = symtab.get_symbol(sym_idx) + entry.symbol = sym + # copy section we relocate from + entry.section = sym.section + # calc addend in segment + entry.section_addend = entry.addend + sym.value + + # clear symbol if its empty + if sym.name_str == "": + entry.symbol = None + + # add entry to section list + tgt_sect = entry.section + if tgt_sect in by_sect: + by_sect_list = by_sect[tgt_sect] + else: + by_sect_list = [] + by_sect[tgt_sect] = by_sect_list + by_sect_list.append(entry) + + # sort all by_seg entries + for sect in by_sect: + by_sect_list = by_sect[sect] + by_sect_list.sort(key=lambda x: x.offset) + + def load(self, f): + """load an ELF file from the given file object f and return an ELFFile instance or None if loading failed""" - ef = ELFFile() - - # read identifier - ident = ELFIdentifier() - ident_data = f.read(16) - ident.parse(ident_data) - ef.identifier = ident - - # read header - hdr = ELFHeader() - hdr_data = f.read(36) - hdr.parse(hdr_data) - ef.header = hdr - - # expect a non-empty section header - if hdr.shnum == 0: - raise ELFParseError("No segment header defined!") - - # load all section headers - self._load_section_headers(f, ef) - - # load and decode sections - self._load_sections(f, ef) - - # get string table with segment names - strtab_idx = ef.header.shstrndx - strtab = ef.sections[strtab_idx] - if strtab.__class__ != ELFSectionStringTable: - raise ELFParseError("No strtab for segment header found! ") - - # process sections - for sect in ef.sections: - # name all sections by using the string table - self._name_section(sect, strtab) - # resolve symbol table names - if sect.header.type_ == SHT_SYMTAB: - # store in file symtabs - ef.symtabs.append(sect) - # get names in symtab - self._resolve_symtab_names(sect, ef.sections) - # link sections to symbols - self._resolve_symtab_indices(sect, ef.sections) - # assign symbols to sections - self._assign_symbols_to_sections(sect) - - # resolve rela links and symbols - for sect in ef.sections: - if sect.header.type_ == SHT_RELA: - self._resolve_rela_links(sect, ef.sections) - ef.relas.append(sect) - - return ef + ef = ELFFile() + + # read identifier + ident = ELFIdentifier() + ident_data = f.read(16) + ident.parse(ident_data) + ef.identifier = ident + + # read header + hdr = ELFHeader() + hdr_data = f.read(36) + hdr.parse(hdr_data) + ef.header = hdr + + # expect a non-empty section header + if hdr.shnum == 0: + raise ELFParseError("No segment header defined!") + + # load all section headers + self._load_section_headers(f, ef) + + # load and decode sections + self._load_sections(f, ef) + + # get string table with segment names + strtab_idx = ef.header.shstrndx + strtab = ef.sections[strtab_idx] + if strtab.__class__ != ELFSectionStringTable: + raise ELFParseError("No strtab for segment header found! ") + + # process sections + for sect in ef.sections: + # name all sections by using the string table + self._name_section(sect, strtab) + # resolve symbol table names + if sect.header.type_ == SHT_SYMTAB: + # store in file symtabs + ef.symtabs.append(sect) + # get names in symtab + self._resolve_symtab_names(sect, ef.sections) + # link sections to symbols + self._resolve_symtab_indices(sect, ef.sections) + # assign symbols to sections + self._assign_symbols_to_sections(sect) + + # resolve rela links and symbols + for sect in ef.sections: + if sect.header.type_ == SHT_RELA: + self._resolve_rela_links(sect, ef.sections) + ef.relas.append(sect) + + return ef + # mini test -if __name__ == '__main__': - import sys - reader = ELFReader() - for a in sys.argv[1:]: - f = open(a, "rb") - ef = reader.load(f) +if __name__ == "__main__": + import sys + reader = ELFReader() + for a in sys.argv[1:]: + f = open(a, "rb") + ef = reader.load(f) diff --git a/amitools/binfmt/hunk/BinFmtHunk.py b/amitools/binfmt/hunk/BinFmtHunk.py index 03573845..47339456 100644 --- a/amitools/binfmt/hunk/BinFmtHunk.py +++ b/amitools/binfmt/hunk/BinFmtHunk.py @@ -1,223 +1,223 @@ - - from amitools.binfmt.BinImage import * from .HunkBlockFile import HunkBlockFile, HunkParseError from .HunkLoadSegFile import HunkLoadSegFile, HunkSegment from .HunkDebug import * from . import Hunk + class BinFmtHunk: - """Handle Amiga's native Hunk file format""" - - def is_image(self, path): - """check if a given file is a hunk LoadSeg file""" - with open(path, "rb") as f: - return self.is_image_fobj(f) - - def is_image_fobj(self, fobj): - """check if a given fobj is a hunk LoadSeg file""" - bf = HunkBlockFile() - bf_type = bf.peek_type(fobj) - return bf_type == Hunk.TYPE_LOADSEG - - def load_image(self, path): - """load a BinImage from a hunk file given via path""" - with open(path, "rb") as f: - return self.load_image_fobj(f) - - def load_image_fobj(self, fobj): - """load a BinImage from a hunk file given via file obj""" - # read the hunk blocks - bf = HunkBlockFile() - bf.read(fobj, isLoadSeg=True) - # derive load seg file - lsf = HunkLoadSegFile() - lsf.parse_block_file(bf) - # convert load seg file - return self.create_image_from_load_seg_file(lsf) - - def save_image(self, path, bin_img): - """save a BinImage to a hunk file given via path""" - with open(path, "wb") as f: - self.save_image_fobj(f, bin_img) - - def save_image_fobj(self, fobj, bin_img): - """save a BinImage to a hunk file given via file obj""" - lsf = self.create_load_seg_file_from_image(bin_img) - bf = lsf.create_block_file() - bf.write(fobj, isLoadSeg=True) - - def create_load_seg_file_from_image(self, bin_img): - """create a HunkLodSegFile from a BinImage""" - lsf = HunkLoadSegFile() - for seg in bin_img.segments: - seg_type = seg.get_type() - # create HunkSegment - lseg = HunkSegment() - lsf.add_segment(lseg) - if seg_type == SEGMENT_TYPE_CODE: - lseg.setup_code(seg.data) - elif seg_type == SEGMENT_TYPE_DATA: - lseg.setup_data(seg.data) - elif seg_type == SEGMENT_TYPE_BSS: - lseg.setup_bss(seg.size) - else: - raise HunkParseError("Unknown Segment Type in BinImage: %d" % seg_type) - # add relocs - self._add_bin_img_relocs(lseg, seg) - # add symbols - self._add_bin_img_symbols(lseg, seg) - # add debug info - self._add_bin_img_debug_info(lseg, seg) - return lsf - - def _add_bin_img_relocs(self, hunk_seg, seg): - reloc_segs = seg.get_reloc_to_segs() - hunk_relocs = [] - for reloc_seg in reloc_segs: - seg_id = reloc_seg.id - reloc = seg.get_reloc(reloc_seg) - relocs = reloc.get_relocs() - offsets = [] - for r in relocs: - if r.get_width() != 2 or r.get_addend() != 0: - raise HunkParseError("Invalid reloc: " + r) - offsets.append(r.get_offset()) - hunk_relocs.append((seg_id, offsets)) - if len(hunk_relocs) > 0: - hunk_seg.setup_relocs(hunk_relocs) - - def _add_bin_img_symbols(self, hunk_seg, seg): - sym_tab = seg.get_symtab() - if sym_tab is not None: - hunk_sym_list = [] - for sym in sym_tab.get_symbols(): - hunk_sym_list.append((sym.get_name(), sym.get_offset())) - hunk_seg.setup_symbols(hunk_sym_list) - - def _add_bin_img_debug_info(self, hunk_seg, seg): - debug_line = seg.get_debug_line() - if debug_line is not None: - for file in debug_line.get_files(): - src_file = file.get_src_file() - base_offset = file.get_base_offset() - dl = HunkDebugLine(src_file, base_offset) - for e in file.get_entries(): - offset = e.get_offset() - src_line = e.get_src_line() - flags = e.get_flags() - hunk_src_line = src_line | (flags << 24) - dl.add_entry(offset, hunk_src_line) - hunk_seg.setup_debug(dl) - - def create_image_from_load_seg_file(self, lsf): - """create a BinImage from a HunkLoadSegFile object""" - bi = BinImage(BIN_IMAGE_TYPE_HUNK) - bi.set_file_data(lsf) - segs = lsf.get_segments() - for seg in segs: - # what type of segment to we have? - blk_id = seg.seg_blk.blk_id - size = seg.size_longs * 4 - data = seg.seg_blk.data - if blk_id == Hunk.HUNK_CODE: - seg_type = SEGMENT_TYPE_CODE - elif blk_id == Hunk.HUNK_DATA: - seg_type = SEGMENT_TYPE_DATA - elif blk_id == Hunk.HUNK_BSS: - seg_type = SEGMENT_TYPE_BSS - else: - raise HunkParseError("Unknown Segment Type for BinImage: %d" % blk_id) - # create seg - bs = Segment(seg_type, size, data) - bs.set_file_data(seg) - bi.add_segment(bs) - # add relocations if any - bi_segs = bi.get_segments() - for seg in bi_segs: - # add relocations? - hseg = seg.file_data - reloc_blks = hseg.reloc_blks - if reloc_blks is not None: - self._add_hunk_relocs(reloc_blks, seg, bi_segs) - # add symbol table - symbol_blk = hseg.symbol_blk - if symbol_blk is not None: - self._add_hunk_symbols(symbol_blk, seg) - # add debug infos - debug_infos = hseg.debug_infos - if debug_infos is not None: - self._add_debug_infos(debug_infos, seg) - - return bi - - def _add_hunk_relocs(self, blks, seg, all_segs): - """add relocations to a segment""" - for blk in blks: - if blk.blk_id not in (Hunk.HUNK_ABSRELOC32, Hunk.HUNK_RELOC32SHORT): - raise HunkParseError("Invalid Relocations for BinImage: %d" % blk_id) - relocs = blk.relocs - for r in relocs: - hunk_num = r[0] - offsets = r[1] - to_seg = all_segs[hunk_num] - # create reloc for target segment or reuse one. - rl = seg.get_reloc(to_seg) - if rl == None: - rl = Relocations(to_seg) - # add offsets - for o in offsets: - r = Reloc(o) - rl.add_reloc(r) - seg.add_reloc(to_seg, rl) - - def _add_hunk_symbols(self, blk, seg): - """add symbols to segment""" - syms = blk.symbols - if len(syms) == 0: - return - st = SymbolTable() - seg.set_symtab(st) - for sym in syms: - name = sym[0] - offset = sym[1] - symbol = Symbol(offset, name) - st.add_symbol(symbol) - - def _add_debug_infos(self, debug_infos, seg): - dl = DebugLine() - seg.set_debug_line(dl) - for debug_info in debug_infos: - # add source line infos - if isinstance(debug_info, HunkDebugLine): - src_file = debug_info.src_file - # abs path? - pos = src_file.rfind('/') - if pos != -1: - dir_name = src_file[:pos] - src_file = src_file[pos+1:] - else: - dir_name = "" - base_offset = debug_info.base_offset - df = DebugLineFile(src_file, dir_name, base_offset) - dl.add_file(df) - for entry in debug_info.get_entries(): - off = entry.offset - src_line = entry.src_line & 0xffffff - flags = (entry.src_line & 0xff000000) >> 24 - e = DebugLineEntry(off, src_line, flags) - df.add_entry(e) + """Handle Amiga's native Hunk file format""" + + def is_image(self, path): + """check if a given file is a hunk LoadSeg file""" + with open(path, "rb") as f: + return self.is_image_fobj(f) + + def is_image_fobj(self, fobj): + """check if a given fobj is a hunk LoadSeg file""" + bf = HunkBlockFile() + bf_type = bf.peek_type(fobj) + return bf_type == Hunk.TYPE_LOADSEG + + def load_image(self, path): + """load a BinImage from a hunk file given via path""" + with open(path, "rb") as f: + return self.load_image_fobj(f) + + def load_image_fobj(self, fobj): + """load a BinImage from a hunk file given via file obj""" + # read the hunk blocks + bf = HunkBlockFile() + bf.read(fobj, isLoadSeg=True) + # derive load seg file + lsf = HunkLoadSegFile() + lsf.parse_block_file(bf) + # convert load seg file + return self.create_image_from_load_seg_file(lsf) + + def save_image(self, path, bin_img): + """save a BinImage to a hunk file given via path""" + with open(path, "wb") as f: + self.save_image_fobj(f, bin_img) + + def save_image_fobj(self, fobj, bin_img): + """save a BinImage to a hunk file given via file obj""" + lsf = self.create_load_seg_file_from_image(bin_img) + bf = lsf.create_block_file() + bf.write(fobj, isLoadSeg=True) + + def create_load_seg_file_from_image(self, bin_img): + """create a HunkLodSegFile from a BinImage""" + lsf = HunkLoadSegFile() + for seg in bin_img.segments: + seg_type = seg.get_type() + # create HunkSegment + lseg = HunkSegment() + lsf.add_segment(lseg) + if seg_type == SEGMENT_TYPE_CODE: + lseg.setup_code(seg.data) + elif seg_type == SEGMENT_TYPE_DATA: + lseg.setup_data(seg.data) + elif seg_type == SEGMENT_TYPE_BSS: + lseg.setup_bss(seg.size) + else: + raise HunkParseError("Unknown Segment Type in BinImage: %d" % seg_type) + # add relocs + self._add_bin_img_relocs(lseg, seg) + # add symbols + self._add_bin_img_symbols(lseg, seg) + # add debug info + self._add_bin_img_debug_info(lseg, seg) + return lsf + + def _add_bin_img_relocs(self, hunk_seg, seg): + reloc_segs = seg.get_reloc_to_segs() + hunk_relocs = [] + for reloc_seg in reloc_segs: + seg_id = reloc_seg.id + reloc = seg.get_reloc(reloc_seg) + relocs = reloc.get_relocs() + offsets = [] + for r in relocs: + if r.get_width() != 2 or r.get_addend() != 0: + raise HunkParseError("Invalid reloc: " + r) + offsets.append(r.get_offset()) + hunk_relocs.append((seg_id, offsets)) + if len(hunk_relocs) > 0: + hunk_seg.setup_relocs(hunk_relocs) + + def _add_bin_img_symbols(self, hunk_seg, seg): + sym_tab = seg.get_symtab() + if sym_tab is not None: + hunk_sym_list = [] + for sym in sym_tab.get_symbols(): + hunk_sym_list.append((sym.get_name(), sym.get_offset())) + hunk_seg.setup_symbols(hunk_sym_list) + + def _add_bin_img_debug_info(self, hunk_seg, seg): + debug_line = seg.get_debug_line() + if debug_line is not None: + for file in debug_line.get_files(): + src_file = file.get_src_file() + base_offset = file.get_base_offset() + dl = HunkDebugLine(src_file, base_offset) + for e in file.get_entries(): + offset = e.get_offset() + src_line = e.get_src_line() + flags = e.get_flags() + hunk_src_line = src_line | (flags << 24) + dl.add_entry(offset, hunk_src_line) + hunk_seg.setup_debug(dl) + + def create_image_from_load_seg_file(self, lsf): + """create a BinImage from a HunkLoadSegFile object""" + bi = BinImage(BIN_IMAGE_TYPE_HUNK) + bi.set_file_data(lsf) + segs = lsf.get_segments() + for seg in segs: + # what type of segment to we have? + blk_id = seg.seg_blk.blk_id + size = seg.size_longs * 4 + data = seg.seg_blk.data + if blk_id == Hunk.HUNK_CODE: + seg_type = SEGMENT_TYPE_CODE + elif blk_id == Hunk.HUNK_DATA: + seg_type = SEGMENT_TYPE_DATA + elif blk_id == Hunk.HUNK_BSS: + seg_type = SEGMENT_TYPE_BSS + else: + raise HunkParseError("Unknown Segment Type for BinImage: %d" % blk_id) + # create seg + bs = Segment(seg_type, size, data) + bs.set_file_data(seg) + bi.add_segment(bs) + # add relocations if any + bi_segs = bi.get_segments() + for seg in bi_segs: + # add relocations? + hseg = seg.file_data + reloc_blks = hseg.reloc_blks + if reloc_blks is not None: + self._add_hunk_relocs(reloc_blks, seg, bi_segs) + # add symbol table + symbol_blk = hseg.symbol_blk + if symbol_blk is not None: + self._add_hunk_symbols(symbol_blk, seg) + # add debug infos + debug_infos = hseg.debug_infos + if debug_infos is not None: + self._add_debug_infos(debug_infos, seg) + + return bi + + def _add_hunk_relocs(self, blks, seg, all_segs): + """add relocations to a segment""" + for blk in blks: + if blk.blk_id not in (Hunk.HUNK_ABSRELOC32, Hunk.HUNK_RELOC32SHORT): + raise HunkParseError("Invalid Relocations for BinImage: %d" % blk_id) + relocs = blk.relocs + for r in relocs: + hunk_num = r[0] + offsets = r[1] + to_seg = all_segs[hunk_num] + # create reloc for target segment or reuse one. + rl = seg.get_reloc(to_seg) + if rl == None: + rl = Relocations(to_seg) + # add offsets + for o in offsets: + r = Reloc(o) + rl.add_reloc(r) + seg.add_reloc(to_seg, rl) + + def _add_hunk_symbols(self, blk, seg): + """add symbols to segment""" + syms = blk.symbols + if len(syms) == 0: + return + st = SymbolTable() + seg.set_symtab(st) + for sym in syms: + name = sym[0] + offset = sym[1] + symbol = Symbol(offset, name) + st.add_symbol(symbol) + + def _add_debug_infos(self, debug_infos, seg): + dl = DebugLine() + seg.set_debug_line(dl) + for debug_info in debug_infos: + # add source line infos + if isinstance(debug_info, HunkDebugLine): + src_file = debug_info.src_file + # abs path? + pos = src_file.rfind("/") + if pos != -1: + dir_name = src_file[:pos] + src_file = src_file[pos + 1 :] + else: + dir_name = "" + base_offset = debug_info.base_offset + df = DebugLineFile(src_file, dir_name, base_offset) + dl.add_file(df) + for entry in debug_info.get_entries(): + off = entry.offset + src_line = entry.src_line & 0xFFFFFF + flags = (entry.src_line & 0xFF000000) >> 24 + e = DebugLineEntry(off, src_line, flags) + df.add_entry(e) # mini test -if __name__ == '__main__': - import sys - bf = BinFmtHunk() - for a in sys.argv[1:]: - if bf.is_image(a): - print("loading", a) - bi = bf.load_image(a) - print(bi) - bf.save_image("a.out", bi) - else: - print("NO HUNK:", a) +if __name__ == "__main__": + import sys + + bf = BinFmtHunk() + for a in sys.argv[1:]: + if bf.is_image(a): + print("loading", a) + bi = bf.load_image(a) + print(bi) + bf.save_image("a.out", bi) + else: + print("NO HUNK:", a) diff --git a/amitools/binfmt/hunk/Hunk.py b/amitools/binfmt/hunk/Hunk.py index cb23e751..593d3312 100644 --- a/amitools/binfmt/hunk/Hunk.py +++ b/amitools/binfmt/hunk/Hunk.py @@ -1,149 +1,139 @@ """General definitions for the AmigaOS Hunk format""" -HUNK_UNIT = 999 -HUNK_NAME = 1000 -HUNK_CODE = 1001 -HUNK_DATA = 1002 -HUNK_BSS = 1003 +HUNK_UNIT = 999 +HUNK_NAME = 1000 +HUNK_CODE = 1001 +HUNK_DATA = 1002 +HUNK_BSS = 1003 HUNK_ABSRELOC32 = 1004 HUNK_RELRELOC16 = 1005 -HUNK_RELRELOC8 = 1006 -HUNK_EXT = 1007 -HUNK_SYMBOL = 1008 -HUNK_DEBUG = 1009 -HUNK_END = 1010 -HUNK_HEADER = 1011 - -HUNK_OVERLAY = 1013 -HUNK_BREAK = 1014 -HUNK_DREL32 = 1015 -HUNK_DREL16 = 1016 -HUNK_DREL8 = 1017 -HUNK_LIB = 1018 -HUNK_INDEX = 1019 +HUNK_RELRELOC8 = 1006 +HUNK_EXT = 1007 +HUNK_SYMBOL = 1008 +HUNK_DEBUG = 1009 +HUNK_END = 1010 +HUNK_HEADER = 1011 + +HUNK_OVERLAY = 1013 +HUNK_BREAK = 1014 +HUNK_DREL32 = 1015 +HUNK_DREL16 = 1016 +HUNK_DREL8 = 1017 +HUNK_LIB = 1018 +HUNK_INDEX = 1019 HUNK_RELOC32SHORT = 1020 HUNK_RELRELOC32 = 1021 HUNK_ABSRELOC16 = 1022 -HUNK_PPC_CODE = 1257 +HUNK_PPC_CODE = 1257 HUNK_RELRELOC26 = 1260 hunk_names = { -HUNK_UNIT : "HUNK_UNIT", -HUNK_NAME : "HUNK_NAME", -HUNK_CODE : "HUNK_CODE", -HUNK_DATA : "HUNK_DATA", -HUNK_BSS : "HUNK_BSS", -HUNK_ABSRELOC32 : "HUNK_ABSRELOC32", -HUNK_RELRELOC16 : "HUNK_RELRELOC16", -HUNK_RELRELOC8 : "HUNK_RELRELOC8", -HUNK_EXT : "HUNK_EXT", -HUNK_SYMBOL : "HUNK_SYMBOL", -HUNK_DEBUG : "HUNK_DEBUG", -HUNK_END : "HUNK_END", -HUNK_HEADER : "HUNK_HEADER", -HUNK_OVERLAY : "HUNK_OVERLAY", -HUNK_BREAK : "HUNK_BREAK", -HUNK_DREL32 : "HUNK_DREL32", -HUNK_DREL16 : "HUNK_DREL16", -HUNK_DREL8 : "HUNK_DREL8", -HUNK_LIB : "HUNK_LIB", -HUNK_INDEX : "HUNK_INDEX", -HUNK_RELOC32SHORT : "HUNK_RELOC32SHORT", -HUNK_RELRELOC32 : "HUNK_RELRELOC32", -HUNK_ABSRELOC16 : "HUNK_ABSRELOC16", -HUNK_PPC_CODE : "HUNK_PPC_CODE", -HUNK_RELRELOC26 : "HUNK_RELRELOC26", + HUNK_UNIT: "HUNK_UNIT", + HUNK_NAME: "HUNK_NAME", + HUNK_CODE: "HUNK_CODE", + HUNK_DATA: "HUNK_DATA", + HUNK_BSS: "HUNK_BSS", + HUNK_ABSRELOC32: "HUNK_ABSRELOC32", + HUNK_RELRELOC16: "HUNK_RELRELOC16", + HUNK_RELRELOC8: "HUNK_RELRELOC8", + HUNK_EXT: "HUNK_EXT", + HUNK_SYMBOL: "HUNK_SYMBOL", + HUNK_DEBUG: "HUNK_DEBUG", + HUNK_END: "HUNK_END", + HUNK_HEADER: "HUNK_HEADER", + HUNK_OVERLAY: "HUNK_OVERLAY", + HUNK_BREAK: "HUNK_BREAK", + HUNK_DREL32: "HUNK_DREL32", + HUNK_DREL16: "HUNK_DREL16", + HUNK_DREL8: "HUNK_DREL8", + HUNK_LIB: "HUNK_LIB", + HUNK_INDEX: "HUNK_INDEX", + HUNK_RELOC32SHORT: "HUNK_RELOC32SHORT", + HUNK_RELRELOC32: "HUNK_RELRELOC32", + HUNK_ABSRELOC16: "HUNK_ABSRELOC16", + HUNK_PPC_CODE: "HUNK_PPC_CODE", + HUNK_RELRELOC26: "HUNK_RELRELOC26", } -loadseg_valid_begin_hunks = [ -HUNK_CODE, -HUNK_DATA, -HUNK_BSS, -HUNK_PPC_CODE -] +loadseg_valid_begin_hunks = [HUNK_CODE, HUNK_DATA, HUNK_BSS, HUNK_PPC_CODE] loadseg_valid_extra_hunks = [ -HUNK_ABSRELOC32, -HUNK_RELOC32SHORT, -HUNK_DEBUG, -HUNK_SYMBOL, -HUNK_NAME + HUNK_ABSRELOC32, + HUNK_RELOC32SHORT, + HUNK_DEBUG, + HUNK_SYMBOL, + HUNK_NAME, ] -unit_valid_main_hunks = [ -HUNK_CODE, -HUNK_DATA, -HUNK_BSS, -HUNK_PPC_CODE -] +unit_valid_main_hunks = [HUNK_CODE, HUNK_DATA, HUNK_BSS, HUNK_PPC_CODE] unit_valid_extra_hunks = [ -HUNK_DEBUG, -HUNK_SYMBOL, -HUNK_NAME, -HUNK_EXT, -HUNK_ABSRELOC32, -HUNK_RELRELOC16, -HUNK_RELRELOC8, -HUNK_DREL32, -HUNK_DREL16, -HUNK_DREL8, -HUNK_RELOC32SHORT, -HUNK_RELRELOC32, -HUNK_ABSRELOC16, -HUNK_RELRELOC26, + HUNK_DEBUG, + HUNK_SYMBOL, + HUNK_NAME, + HUNK_EXT, + HUNK_ABSRELOC32, + HUNK_RELRELOC16, + HUNK_RELRELOC8, + HUNK_DREL32, + HUNK_DREL16, + HUNK_DREL8, + HUNK_RELOC32SHORT, + HUNK_RELRELOC32, + HUNK_ABSRELOC16, + HUNK_RELRELOC26, ] reloc_hunks = [ -HUNK_ABSRELOC32, -HUNK_RELRELOC16, -HUNK_RELRELOC8, -HUNK_DREL32, -HUNK_DREL16, -HUNK_DREL8, -HUNK_RELOC32SHORT, -HUNK_RELRELOC32, -HUNK_ABSRELOC16, -HUNK_RELRELOC26, + HUNK_ABSRELOC32, + HUNK_RELRELOC16, + HUNK_RELRELOC8, + HUNK_DREL32, + HUNK_DREL16, + HUNK_DREL8, + HUNK_RELOC32SHORT, + HUNK_RELRELOC32, + HUNK_ABSRELOC16, + HUNK_RELRELOC26, ] -EXT_SYMB = 0 -EXT_DEF = 1 -EXT_ABS = 2 -EXT_RES = 3 -EXT_ABSREF32 = 129 -EXT_ABSCOMMON = 130 -EXT_RELREF16 = 131 -EXT_RELREF8 = 132 -EXT_DEXT32 = 133 -EXT_DEXT16 = 134 -EXT_DEXT8 = 135 -EXT_RELREF32 = 136 -EXT_RELCOMMON = 137 -EXT_ABSREF16 = 138 -EXT_ABSREF8 = 139 -EXT_RELREF26 = 229 +EXT_SYMB = 0 +EXT_DEF = 1 +EXT_ABS = 2 +EXT_RES = 3 +EXT_ABSREF32 = 129 +EXT_ABSCOMMON = 130 +EXT_RELREF16 = 131 +EXT_RELREF8 = 132 +EXT_DEXT32 = 133 +EXT_DEXT16 = 134 +EXT_DEXT8 = 135 +EXT_RELREF32 = 136 +EXT_RELCOMMON = 137 +EXT_ABSREF16 = 138 +EXT_ABSREF8 = 139 +EXT_RELREF26 = 229 ext_names = { -EXT_SYMB : 'EXT_SYMB', -EXT_DEF : 'EXT_DEF', -EXT_ABS : 'EXT_ABS', -EXT_RES : 'EXT_RES', -EXT_ABSREF32 : 'EXT_ABSREF32', -EXT_ABSCOMMON : 'EXT_ABSCOMMON', -EXT_RELREF16 : 'EXT_RELREF16', -EXT_RELREF8 : 'EXT_RELREF8', -EXT_DEXT32 : 'EXT_DEXT32', -EXT_DEXT16 : 'EXT_DEXT16', -EXT_DEXT8 : 'EXT_DEXT8', -EXT_RELREF32 : 'EXT_RELREF32', -EXT_RELCOMMON : 'EXT_RELCOMMON', -EXT_ABSREF16 : 'EXT_ABSREF16', -EXT_ABSREF8 : 'EXT_ABSREF8', -EXT_RELREF26 : 'EXT_RELREF26' + EXT_SYMB: "EXT_SYMB", + EXT_DEF: "EXT_DEF", + EXT_ABS: "EXT_ABS", + EXT_RES: "EXT_RES", + EXT_ABSREF32: "EXT_ABSREF32", + EXT_ABSCOMMON: "EXT_ABSCOMMON", + EXT_RELREF16: "EXT_RELREF16", + EXT_RELREF8: "EXT_RELREF8", + EXT_DEXT32: "EXT_DEXT32", + EXT_DEXT16: "EXT_DEXT16", + EXT_DEXT8: "EXT_DEXT8", + EXT_RELREF32: "EXT_RELREF32", + EXT_RELCOMMON: "EXT_RELCOMMON", + EXT_ABSREF16: "EXT_ABSREF16", + EXT_ABSREF8: "EXT_ABSREF8", + EXT_RELREF26: "EXT_RELREF26", } EXT_TYPE_SHIFT = 24 -EXT_TYPE_SIZE_MASK = 0xffffff +EXT_TYPE_SIZE_MASK = 0xFFFFFF RESULT_OK = 0 RESULT_NO_HUNK_FILE = 1 @@ -151,28 +141,28 @@ RESULT_UNSUPPORTED_HUNKS = 3 result_names = { -RESULT_OK : "RESULT_OK", -RESULT_NO_HUNK_FILE : "RESULT_NO_HUNK_FILE", -RESULT_INVALID_HUNK_FILE : "RESULT_INVALID_HUNK_FILE", -RESULT_UNSUPPORTED_HUNKS : "RESULT_UNSUPPORTED_HUNKS" + RESULT_OK: "RESULT_OK", + RESULT_NO_HUNK_FILE: "RESULT_NO_HUNK_FILE", + RESULT_INVALID_HUNK_FILE: "RESULT_INVALID_HUNK_FILE", + RESULT_UNSUPPORTED_HUNKS: "RESULT_UNSUPPORTED_HUNKS", } -HUNKF_ADVISORY = 1<<29 -HUNKF_CHIP = 1<<30 -HUNKF_FAST = 1<<31 -HUNKF_ALL = (HUNKF_ADVISORY | HUNKF_CHIP | HUNKF_FAST) +HUNKF_ADVISORY = 1 << 29 +HUNKF_CHIP = 1 << 30 +HUNKF_FAST = 1 << 31 +HUNKF_ALL = HUNKF_ADVISORY | HUNKF_CHIP | HUNKF_FAST -HUNK_TYPE_MASK = 0xffff -HUNK_FLAGS_MASK = 0xffff0000 +HUNK_TYPE_MASK = 0xFFFF +HUNK_FLAGS_MASK = 0xFFFF0000 -TYPE_UNKNOWN = 0 -TYPE_LOADSEG = 1 -TYPE_UNIT = 2 -TYPE_LIB = 3 +TYPE_UNKNOWN = 0 +TYPE_LOADSEG = 1 +TYPE_UNIT = 2 +TYPE_LIB = 3 type_names = { - TYPE_UNKNOWN: 'TYPE_UNKNOWN', - TYPE_LOADSEG: 'TYPE_LOADSEG', - TYPE_UNIT: 'TYPE_UNIT', - TYPE_LIB: 'TYPE_LIB' + TYPE_UNKNOWN: "TYPE_UNKNOWN", + TYPE_LOADSEG: "TYPE_LOADSEG", + TYPE_UNIT: "TYPE_UNIT", + TYPE_LIB: "TYPE_LIB", } diff --git a/amitools/binfmt/hunk/HunkBlockFile.py b/amitools/binfmt/hunk/HunkBlockFile.py index f394ca82..450a377b 100644 --- a/amitools/binfmt/hunk/HunkBlockFile.py +++ b/amitools/binfmt/hunk/HunkBlockFile.py @@ -1,769 +1,790 @@ """The hunk block types defined as data classes""" - import struct from .Hunk import * class HunkParseError(Exception): - def __init__(self, msg): - self.msg = msg + def __init__(self, msg): + self.msg = msg - def __str__(self): - return self.msg + def __str__(self): + return self.msg class HunkBlock: - """Base class for all hunk block types""" - - blk_id = 0xdeadbeef - sub_offset = None # used inside LIB - - def _read_long(self, f): - """read a 4 byte long""" - data = f.read(4) - if len(data) != 4: - raise HunkParseError("read_long failed") - return struct.unpack(">I",data)[0] - - def _read_word(self, f): - """read a 2 byte word""" - data = f.read(2) - if len(data) != 2: - raise HunkParseError("read_word failed") - return struct.unpack(">H",data)[0] - - def _read_name(self, f): - """read name stored in longs + """Base class for all hunk block types""" + + blk_id = 0xDEADBEEF + sub_offset = None # used inside LIB + + def _read_long(self, f): + """read a 4 byte long""" + data = f.read(4) + if len(data) != 4: + raise HunkParseError("read_long failed") + return struct.unpack(">I", data)[0] + + def _read_word(self, f): + """read a 2 byte word""" + data = f.read(2) + if len(data) != 2: + raise HunkParseError("read_word failed") + return struct.unpack(">H", data)[0] + + def _read_name(self, f): + """read name stored in longs return size, string """ - num_longs = self._read_long(f) - if num_longs == 0: - return 0,"" - else: - return self._read_name_size(f, num_longs) - - def _read_name_size(self, f, num_longs): - size = (num_longs & 0xffffff) * 4 - data = f.read(size) - if len(data) < size: - return -1,None - endpos = data.find(b'\0') - if endpos == -1: - return size,data - elif endpos == 0: - return 0,"" - else: - return size,data[:endpos] - - def _write_long(self, f, v): - data = struct.pack(">I",v) - f.write(data) - - def _write_word(self, f, v): - data = struct.pack(">H",v) - f.write(data) - - def _write_name(self, f, s, tag=None): - n = len(s) - num_longs = int((n+3)/4) - b = bytearray(num_longs * 4) - if n > 0: - b[0:n] = s - if tag is not None: - num_longs |= tag << 24 - self._write_long(f, num_longs) - f.write(b) + num_longs = self._read_long(f) + if num_longs == 0: + return 0, "" + else: + return self._read_name_size(f, num_longs) + + def _read_name_size(self, f, num_longs): + size = (num_longs & 0xFFFFFF) * 4 + data = f.read(size) + if len(data) < size: + return -1, None + endpos = data.find(b"\0") + if endpos == -1: + return size, data + elif endpos == 0: + return 0, "" + else: + return size, data[:endpos] + + def _write_long(self, f, v): + data = struct.pack(">I", v) + f.write(data) + + def _write_word(self, f, v): + data = struct.pack(">H", v) + f.write(data) + + def _write_name(self, f, s, tag=None): + n = len(s) + num_longs = int((n + 3) / 4) + b = bytearray(num_longs * 4) + if n > 0: + b[0:n] = s + if tag is not None: + num_longs |= tag << 24 + self._write_long(f, num_longs) + f.write(b) class HunkHeaderBlock(HunkBlock): - """HUNK_HEADER - header block of Load Modules""" - - blk_id = HUNK_HEADER - - def __init__(self): - self.reslib_names = [] - self.table_size = 0 - self.first_hunk = 0 - self.last_hunk = 0 - self.hunk_table = [] - - def setup(self, hunk_sizes): - # easy setup for given number of hunks - n = len(hunk_sizes) - if n == 0: - raise HunkParseError("No hunks for HUNK_HEADER given") - self.table_size = n - self.first_hunk = 0 - self.last_hunk = n-1 - self.hunk_table = hunk_sizes - - def parse(self, f): - # parse resident library names (AOS 1.x only) - while True: - l,s = self._read_name(f) - if l < 0: - raise HunkParseError("Error parsing HUNK_HEADER names") - elif l == 0: - break - self.reslib_names.append(s) - - # table size and hunk range - self.table_size = self._read_long(f) - self.first_hunk = self._read_long(f) - self.last_hunk = self._read_long(f) - if self.table_size < 0 or self.first_hunk < 0 or self.last_hunk < 0: - raise HunkParseError("HUNK_HEADER invalid table_size or first_hunk or last_hunk") - - # determine number of hunks in size table - num_hunks = self.last_hunk - self.first_hunk + 1 - for a in range(num_hunks): - hunk_size = self._read_long(f) - if hunk_size < 0: - raise HunkParseError("HUNK_HEADER contains invalid hunk_size") - # note that the upper bits are the target memory type. We only have FAST, - # so let's forget about them for a moment. - self.hunk_table.append(hunk_size & 0x3fffffff) - - def write(self, f): - # write residents - for reslib in self.reslib_names: - self._write_name(f, reslib) - self._write_long(f, 0) - # table size and hunk range - self._write_long(f, self.table_size) - self._write_long(f, self.first_hunk) - self._write_long(f, self.last_hunk) - # sizes - for hunk_size in self.hunk_table: - self._write_long(f, hunk_size) + """HUNK_HEADER - header block of Load Modules""" + + blk_id = HUNK_HEADER + + def __init__(self): + self.reslib_names = [] + self.table_size = 0 + self.first_hunk = 0 + self.last_hunk = 0 + self.hunk_table = [] + + def setup(self, hunk_sizes): + # easy setup for given number of hunks + n = len(hunk_sizes) + if n == 0: + raise HunkParseError("No hunks for HUNK_HEADER given") + self.table_size = n + self.first_hunk = 0 + self.last_hunk = n - 1 + self.hunk_table = hunk_sizes + + def parse(self, f): + # parse resident library names (AOS 1.x only) + while True: + l, s = self._read_name(f) + if l < 0: + raise HunkParseError("Error parsing HUNK_HEADER names") + elif l == 0: + break + self.reslib_names.append(s) + + # table size and hunk range + self.table_size = self._read_long(f) + self.first_hunk = self._read_long(f) + self.last_hunk = self._read_long(f) + if self.table_size < 0 or self.first_hunk < 0 or self.last_hunk < 0: + raise HunkParseError( + "HUNK_HEADER invalid table_size or first_hunk or last_hunk" + ) + + # determine number of hunks in size table + num_hunks = self.last_hunk - self.first_hunk + 1 + for a in range(num_hunks): + hunk_size = self._read_long(f) + if hunk_size < 0: + raise HunkParseError("HUNK_HEADER contains invalid hunk_size") + # note that the upper bits are the target memory type. We only have FAST, + # so let's forget about them for a moment. + self.hunk_table.append(hunk_size & 0x3FFFFFFF) + + def write(self, f): + # write residents + for reslib in self.reslib_names: + self._write_name(f, reslib) + self._write_long(f, 0) + # table size and hunk range + self._write_long(f, self.table_size) + self._write_long(f, self.first_hunk) + self._write_long(f, self.last_hunk) + # sizes + for hunk_size in self.hunk_table: + self._write_long(f, hunk_size) class HunkSegmentBlock(HunkBlock): - """HUNK_CODE, HUNK_DATA, HUNK_BSS""" - def __init__(self, blk_id=None, data=None, size_longs=0): - if blk_id is not None: - self.blk_id = blk_id - self.data = data - self.size_longs = size_longs + """HUNK_CODE, HUNK_DATA, HUNK_BSS""" + + def __init__(self, blk_id=None, data=None, size_longs=0): + if blk_id is not None: + self.blk_id = blk_id + self.data = data + self.size_longs = size_longs - def parse(self, f): - size = self._read_long(f) - self.size_longs = size - if self.blk_id != HUNK_BSS: - size *= 4 - self.data = f.read(size) + def parse(self, f): + size = self._read_long(f) + self.size_longs = size + if self.blk_id != HUNK_BSS: + size *= 4 + self.data = f.read(size) - def write(self, f): - self._write_long(f, self.size_longs) - f.write(self.data) + def write(self, f): + self._write_long(f, self.size_longs) + f.write(self.data) class HunkRelocLongBlock(HunkBlock): - """HUNK_ABSRELOC32 - relocations stored in longs""" - def __init__(self, blk_id=None, relocs=None): - if blk_id is not None: - self.blk_id = blk_id - # map hunk number to list of relocations (i.e. byte offsets in long) - if relocs is None: - self.relocs = [] - else: - self.relocs = relocs - - def parse(self, f): - while True: - num = self._read_long(f) - if num == 0: - break - hunk_num = self._read_long(f) - offsets = [] - for i in range(num): - off = self._read_long(f) - offsets.append(off) - self.relocs.append((hunk_num, offsets)) - - def write(self,f): - for reloc in self.relocs: - hunk_num, offsets = reloc - self._write_long(f, len(offsets)) - self._write_long(f, hunk_num) - for off in offsets: - self._write_long(f, off) - self._write_long(f, 0) + """HUNK_ABSRELOC32 - relocations stored in longs""" + + def __init__(self, blk_id=None, relocs=None): + if blk_id is not None: + self.blk_id = blk_id + # map hunk number to list of relocations (i.e. byte offsets in long) + if relocs is None: + self.relocs = [] + else: + self.relocs = relocs + + def parse(self, f): + while True: + num = self._read_long(f) + if num == 0: + break + hunk_num = self._read_long(f) + offsets = [] + for i in range(num): + off = self._read_long(f) + offsets.append(off) + self.relocs.append((hunk_num, offsets)) + + def write(self, f): + for reloc in self.relocs: + hunk_num, offsets = reloc + self._write_long(f, len(offsets)) + self._write_long(f, hunk_num) + for off in offsets: + self._write_long(f, off) + self._write_long(f, 0) class HunkRelocWordBlock(HunkBlock): - """HUNK_RELOC32SHORT - relocations stored in words""" - def __init__(self, blk_id=None, relocs=None): - if blk_id is not None: - self.blk_id = blk_id - # list of tuples (hunk_no, [offsets]) - if relocs is None: - self.relocs = [] - else: - self.relocs = relocs - - def parse(self, f): - num_words = 0 - while True: - num_offs = self._read_word(f) - num_words += 1 - if num_offs == 0: - break - hunk_num = self._read_word(f) - num_words += num_offs + 1 - offsets = [] - for i in range(num_offs): - off = self._read_word(f) - offsets.append(off) - self.relocs.append((hunk_num, offsets)) - # pad to long - if num_words % 2 == 1: - self._read_word(f) - - def write(self, f): - num_words = 0 - for hunk_num, offsets in self.relocs: - num_offs = len(offsets) - self._write_word(f, num_offs) - self._write_word(f, hunk_num) - for i in range(num_offs): - self._write_word(f, offsets[i]) - num_words += 2 + num_offs - # end - self._write_word(f, 0) - num_words += 1 - # padding? - if num_words % 2 == 1: - self._write_word(f, 0) + """HUNK_RELOC32SHORT - relocations stored in words""" + + def __init__(self, blk_id=None, relocs=None): + if blk_id is not None: + self.blk_id = blk_id + # list of tuples (hunk_no, [offsets]) + if relocs is None: + self.relocs = [] + else: + self.relocs = relocs + + def parse(self, f): + num_words = 0 + while True: + num_offs = self._read_word(f) + num_words += 1 + if num_offs == 0: + break + hunk_num = self._read_word(f) + num_words += num_offs + 1 + offsets = [] + for i in range(num_offs): + off = self._read_word(f) + offsets.append(off) + self.relocs.append((hunk_num, offsets)) + # pad to long + if num_words % 2 == 1: + self._read_word(f) + + def write(self, f): + num_words = 0 + for hunk_num, offsets in self.relocs: + num_offs = len(offsets) + self._write_word(f, num_offs) + self._write_word(f, hunk_num) + for i in range(num_offs): + self._write_word(f, offsets[i]) + num_words += 2 + num_offs + # end + self._write_word(f, 0) + num_words += 1 + # padding? + if num_words % 2 == 1: + self._write_word(f, 0) class HunkEndBlock(HunkBlock): - """HUNK_END""" - blk_id = HUNK_END + """HUNK_END""" + + blk_id = HUNK_END + + def parse(self, f): + pass - def parse(self, f): - pass - def write(self, f): - pass + def write(self, f): + pass class HunkOverlayBlock(HunkBlock): - """HUNK_OVERLAY""" - blk_id = HUNK_OVERLAY + """HUNK_OVERLAY""" - def __init__(self): - self.data = None + blk_id = HUNK_OVERLAY - def parse(self, f): - num_longs = self._read_long(f) - self.data = f.read(num_longs * 4) + def __init__(self): + self.data = None - def write(self, f): - self._write_long(f, int(self.data/4)) - f.write(self.data) + def parse(self, f): + num_longs = self._read_long(f) + self.data = f.read(num_longs * 4) + + def write(self, f): + self._write_long(f, int(self.data / 4)) + f.write(self.data) class HunkBreakBlock(HunkBlock): - """HUNK_BREAK""" - blk_id = HUNK_BREAK + """HUNK_BREAK""" + + blk_id = HUNK_BREAK - def parse(self, f): - pass - def write(self, f): - pass + def parse(self, f): + pass + + def write(self, f): + pass class HunkDebugBlock(HunkBlock): - """HUNK_DEBUG""" - blk_id = HUNK_DEBUG + """HUNK_DEBUG""" + + blk_id = HUNK_DEBUG - def __init__(self, debug_data=None): - self.debug_data = debug_data + def __init__(self, debug_data=None): + self.debug_data = debug_data - def parse(self, f): - num_longs = self._read_long(f) - num_bytes = num_longs * 4 - self.debug_data = f.read(num_bytes) + def parse(self, f): + num_longs = self._read_long(f) + num_bytes = num_longs * 4 + self.debug_data = f.read(num_bytes) - def write(self, f): - num_longs = int(len(self.debug_data)/4) - self._write_long(f, num_longs) - f.write(self.debug_data) + def write(self, f): + num_longs = int(len(self.debug_data) / 4) + self._write_long(f, num_longs) + f.write(self.debug_data) class HunkSymbolBlock(HunkBlock): - """HUNK_SYMBOL""" - blk_id = HUNK_SYMBOL - - def __init__(self, symbols=None): - if symbols is None: - self.symbols = [] - else: - self.symbols = symbols - - def parse(self, f): - while True: - s,n = self._read_name(f) - if s == 0: - break - off = self._read_long(f) - self.symbols.append((n, off)) - - def write(self, f): - for sym, off in self.symbols: - self._write_name(f, sym) - self._write_long(f, off) - self._write_long(f, 0) + """HUNK_SYMBOL""" + + blk_id = HUNK_SYMBOL + + def __init__(self, symbols=None): + if symbols is None: + self.symbols = [] + else: + self.symbols = symbols + + def parse(self, f): + while True: + s, n = self._read_name(f) + if s == 0: + break + off = self._read_long(f) + self.symbols.append((n, off)) + + def write(self, f): + for sym, off in self.symbols: + self._write_name(f, sym) + self._write_long(f, off) + self._write_long(f, 0) class HunkUnitBlock(HunkBlock): - """HUNK_UNIT""" - blk_id = HUNK_UNIT + """HUNK_UNIT""" - def __init__(self): - self.name = None + blk_id = HUNK_UNIT - def parse(self, f): - _,self.name = self._read_name(f) + def __init__(self): + self.name = None - def write(self, f): - self._write_name(f, self.name) + def parse(self, f): + _, self.name = self._read_name(f) + + def write(self, f): + self._write_name(f, self.name) class HunkNameBlock(HunkBlock): - """HUNK_NAME""" - blk_id = HUNK_NAME + """HUNK_NAME""" + + blk_id = HUNK_NAME - def __init__(self): - self.name = None + def __init__(self): + self.name = None - def parse(self, f): - _,self.name = self._read_name(f) + def parse(self, f): + _, self.name = self._read_name(f) - def write(self, f): - self._write_name(f, self.name) + def write(self, f): + self._write_name(f, self.name) class HunkExtEntry: - """helper class for HUNK_EXT entries""" - def __init__(self, name, ext_type, value, bss_size, offsets): - self.name = name - self.ext_type = ext_type - self.def_value = value # defs only - self.bss_size = bss_size # ABSCOMMON only - self.ref_offsets = offsets # refs only: list of offsets + """helper class for HUNK_EXT entries""" + + def __init__(self, name, ext_type, value, bss_size, offsets): + self.name = name + self.ext_type = ext_type + self.def_value = value # defs only + self.bss_size = bss_size # ABSCOMMON only + self.ref_offsets = offsets # refs only: list of offsets class HunkExtBlock(HunkBlock): - """HUNK_EXT""" - blk_id = HUNK_EXT - - def __init__(self): - self.entries = [] - - def parse(self, f): - while True: - tag = self._read_long(f) - if tag == 0: - break - ext_type = tag >> 24 - name_len = tag & 0xffffff - _,name = self._read_name_size(f, name_len) - # add on for type - bss_size = None - offsets = None - value = None - # ABSCOMMON -> bss size - if ext_type == EXT_ABSCOMMON: - bss_size = self._read_long(f) - # is a reference - elif ext_type >= 0x80: - num_refs = self._read_long(f) - offsets = [] - for i in range(num_refs): - off = self._read_long(f) - offsets.append(off) - # is a definition - else: - value = self._read_long(f) - e = HunkExtEntry(name, ext_type, value, bss_size, offsets) - self.entries.append(e) - - def write(self, f): - for entry in self.entries: - ext_type = entry.ext_type - self._write_name(f, entry.name, tag=ext_type) - # ABSCOMMON - if ext_type == EXT_ABSCOMMON: - self._write_long(f, entry.bss_size) - # is a reference - elif ext_type >= 0x80: - num_offsets = len(entry.ref_offsets) - self._write_long(f, num_offsets) - for off in entry.ref_offsets: - self._write_long(f, off) - # is a definition - else: - self._write_long(f, entry.def_value) - self._write_long(f,0) + """HUNK_EXT""" + + blk_id = HUNK_EXT + + def __init__(self): + self.entries = [] + + def parse(self, f): + while True: + tag = self._read_long(f) + if tag == 0: + break + ext_type = tag >> 24 + name_len = tag & 0xFFFFFF + _, name = self._read_name_size(f, name_len) + # add on for type + bss_size = None + offsets = None + value = None + # ABSCOMMON -> bss size + if ext_type == EXT_ABSCOMMON: + bss_size = self._read_long(f) + # is a reference + elif ext_type >= 0x80: + num_refs = self._read_long(f) + offsets = [] + for i in range(num_refs): + off = self._read_long(f) + offsets.append(off) + # is a definition + else: + value = self._read_long(f) + e = HunkExtEntry(name, ext_type, value, bss_size, offsets) + self.entries.append(e) + + def write(self, f): + for entry in self.entries: + ext_type = entry.ext_type + self._write_name(f, entry.name, tag=ext_type) + # ABSCOMMON + if ext_type == EXT_ABSCOMMON: + self._write_long(f, entry.bss_size) + # is a reference + elif ext_type >= 0x80: + num_offsets = len(entry.ref_offsets) + self._write_long(f, num_offsets) + for off in entry.ref_offsets: + self._write_long(f, off) + # is a definition + else: + self._write_long(f, entry.def_value) + self._write_long(f, 0) class HunkLibBlock(HunkBlock): - """HUNK_LIB""" - blk_id = HUNK_LIB - - def __init__(self): - self.blocks = [] - self.offsets = [] - - def parse(self, f, isLoadSeg=False): - num_longs = self._read_long(f) - pos = f.tell() - end_pos = pos + num_longs * 4 - # first read block id - while pos < end_pos: - tag = f.read(4) - # EOF - if len(tag) == 0: - break - elif len(tag) != 4: - raise HunkParseError("Hunk block tag too short!") - blk_id = struct.unpack(">I",tag)[0] - # mask out mem flags - blk_id = blk_id & HUNK_TYPE_MASK - # look up block type - if blk_id in hunk_block_type_map: - blk_type = hunk_block_type_map[blk_id] - # create block and parse - block = blk_type() - block.blk_id = blk_id - block.parse(f) - self.offsets.append(pos) - self.blocks.append(block) - else: - raise HunkParseError("Unsupported hunk type: %04d" % blk_id) - pos = f.tell() - - def write(self, f): - # write dummy length (fill in later) - pos = f.tell() - start = pos - self._write_long(f, 0) - self.offsets = [] - # write blocks - for block in self.blocks: - block_id = block.blk_id - block_id_raw = struct.pack(">I",block_id) - f.write(block_id_raw) - # write block itself - block.write(f) - # update offsets - self.offsets.append(pos) - pos = f.tell() - # fill in size - end = f.tell() - size = end - start - 4 - num_longs = size // 4 - f.seek(start, 0) - self._write_long(f, num_longs) - f.seek(end, 0) + """HUNK_LIB""" + + blk_id = HUNK_LIB + + def __init__(self): + self.blocks = [] + self.offsets = [] + + def parse(self, f, isLoadSeg=False): + num_longs = self._read_long(f) + pos = f.tell() + end_pos = pos + num_longs * 4 + # first read block id + while pos < end_pos: + tag = f.read(4) + # EOF + if len(tag) == 0: + break + elif len(tag) != 4: + raise HunkParseError("Hunk block tag too short!") + blk_id = struct.unpack(">I", tag)[0] + # mask out mem flags + blk_id = blk_id & HUNK_TYPE_MASK + # look up block type + if blk_id in hunk_block_type_map: + blk_type = hunk_block_type_map[blk_id] + # create block and parse + block = blk_type() + block.blk_id = blk_id + block.parse(f) + self.offsets.append(pos) + self.blocks.append(block) + else: + raise HunkParseError("Unsupported hunk type: %04d" % blk_id) + pos = f.tell() + + def write(self, f): + # write dummy length (fill in later) + pos = f.tell() + start = pos + self._write_long(f, 0) + self.offsets = [] + # write blocks + for block in self.blocks: + block_id = block.blk_id + block_id_raw = struct.pack(">I", block_id) + f.write(block_id_raw) + # write block itself + block.write(f) + # update offsets + self.offsets.append(pos) + pos = f.tell() + # fill in size + end = f.tell() + size = end - start - 4 + num_longs = size // 4 + f.seek(start, 0) + self._write_long(f, num_longs) + f.seek(end, 0) class HunkIndexUnitEntry: - def __init__(self, name_off, first_hunk_long_off): - self.name_off = name_off - self.first_hunk_long_off = first_hunk_long_off - self.index_hunks = [] + def __init__(self, name_off, first_hunk_long_off): + self.name_off = name_off + self.first_hunk_long_off = first_hunk_long_off + self.index_hunks = [] class HunkIndexHunkEntry: - def __init__(self, name_off, hunk_longs, hunk_ctype): - self.name_off = name_off - self.hunk_longs = hunk_longs - self.hunk_ctype = hunk_ctype - self.sym_refs = [] - self.sym_defs = [] + def __init__(self, name_off, hunk_longs, hunk_ctype): + self.name_off = name_off + self.hunk_longs = hunk_longs + self.hunk_ctype = hunk_ctype + self.sym_refs = [] + self.sym_defs = [] class HunkIndexSymbolRef: - def __init__(self, name_off): - self.name_off = name_off + def __init__(self, name_off): + self.name_off = name_off class HunkIndexSymbolDef: - def __init__(self, name_off, value, sym_ctype): - self.name_off = name_off - self.value = value - self.sym_ctype = sym_ctype + def __init__(self, name_off, value, sym_ctype): + self.name_off = name_off + self.value = value + self.sym_ctype = sym_ctype class HunkIndexBlock(HunkBlock): - """HUNK_INDEX""" - blk_id = HUNK_INDEX - - def __init__(self): - self.strtab = None - self.units = [] - - def parse(self, f): - num_longs = self._read_long(f) - num_words = num_longs * 2 - # string table size - strtab_size = self._read_word(f) - self.strtab = f.read(strtab_size) - num_words = num_words - (strtab_size // 2) - 1 - # read index unit blocks - while num_words > 1: - # unit description - name_off = self._read_word(f) - first_hunk_long_off = self._read_word(f) - num_hunks = self._read_word(f) - num_words -= 3 - unit_entry = HunkIndexUnitEntry(name_off, first_hunk_long_off) - self.units.append(unit_entry) - for i in range(num_hunks): - # hunk description - name_off = self._read_word(f) - hunk_longs = self._read_word(f) - hunk_ctype = self._read_word(f) - hunk_entry = HunkIndexHunkEntry(name_off, hunk_longs, hunk_ctype) - unit_entry.index_hunks.append(hunk_entry) - # refs - num_refs = self._read_word(f) - for j in range(num_refs): - name_off = self._read_word(f) - hunk_entry.sym_refs.append(HunkIndexSymbolRef(name_off)) - # defs - num_defs = self._read_word(f) - for j in range(num_defs): - name_off = self._read_word(f) - value = self._read_word(f) - stype = self._read_word(f) - hunk_entry.sym_defs.append(HunkIndexSymbolDef(name_off, value, stype)) - # calc word size - num_words = num_words - (5 + num_refs + num_defs * 3) - # alignment word? - if num_words == 1: - self._read_word(f) - - def write(self, f): - # write dummy size - num_longs_pos = f.tell() - self._write_long(f, 0) - num_words = 0 - # write string table - size_strtab = len(self.strtab) - self._write_word(f, size_strtab) - f.write(self.strtab) - num_words += size_strtab // 2 + 1 - # write unit blocks - for unit in self.units: - self._write_word(f, unit.name_off) - self._write_word(f, unit.first_hunk_long_off) - self._write_word(f, len(unit.index_hunks)) - num_words += 3 - for index in unit.index_hunks: - self._write_word(f, index.name_off) - self._write_word(f, index.hunk_longs) - self._write_word(f, index.hunk_ctype) - # refs - num_refs = len(index.sym_refs) - self._write_word(f, num_refs) - for sym_ref in index.sym_refs: - self._write_word(f, sym_ref.name_off) - # defs - num_defs = len(index.sym_defs) - self._write_word(f, num_defs) - for sym_def in index.sym_defs: - self._write_word(f, sym_def.name_off) - self._write_word(f, sym_def.value) - self._write_word(f, sym_def.sym_ctype) - # count words - num_words += 5 + num_refs + num_defs * 3 - # alignment word? - if num_words % 2 == 1: - num_words += 1 - self._write_word(f, 0) - # fill in real size - pos = f.tell() - f.seek(num_longs_pos, 0) - self._write_long(f, num_words/2) - f.seek(pos, 0) + """HUNK_INDEX""" + + blk_id = HUNK_INDEX + + def __init__(self): + self.strtab = None + self.units = [] + + def parse(self, f): + num_longs = self._read_long(f) + num_words = num_longs * 2 + # string table size + strtab_size = self._read_word(f) + self.strtab = f.read(strtab_size) + num_words = num_words - (strtab_size // 2) - 1 + # read index unit blocks + while num_words > 1: + # unit description + name_off = self._read_word(f) + first_hunk_long_off = self._read_word(f) + num_hunks = self._read_word(f) + num_words -= 3 + unit_entry = HunkIndexUnitEntry(name_off, first_hunk_long_off) + self.units.append(unit_entry) + for i in range(num_hunks): + # hunk description + name_off = self._read_word(f) + hunk_longs = self._read_word(f) + hunk_ctype = self._read_word(f) + hunk_entry = HunkIndexHunkEntry(name_off, hunk_longs, hunk_ctype) + unit_entry.index_hunks.append(hunk_entry) + # refs + num_refs = self._read_word(f) + for j in range(num_refs): + name_off = self._read_word(f) + hunk_entry.sym_refs.append(HunkIndexSymbolRef(name_off)) + # defs + num_defs = self._read_word(f) + for j in range(num_defs): + name_off = self._read_word(f) + value = self._read_word(f) + stype = self._read_word(f) + hunk_entry.sym_defs.append( + HunkIndexSymbolDef(name_off, value, stype) + ) + # calc word size + num_words = num_words - (5 + num_refs + num_defs * 3) + # alignment word? + if num_words == 1: + self._read_word(f) + + def write(self, f): + # write dummy size + num_longs_pos = f.tell() + self._write_long(f, 0) + num_words = 0 + # write string table + size_strtab = len(self.strtab) + self._write_word(f, size_strtab) + f.write(self.strtab) + num_words += size_strtab // 2 + 1 + # write unit blocks + for unit in self.units: + self._write_word(f, unit.name_off) + self._write_word(f, unit.first_hunk_long_off) + self._write_word(f, len(unit.index_hunks)) + num_words += 3 + for index in unit.index_hunks: + self._write_word(f, index.name_off) + self._write_word(f, index.hunk_longs) + self._write_word(f, index.hunk_ctype) + # refs + num_refs = len(index.sym_refs) + self._write_word(f, num_refs) + for sym_ref in index.sym_refs: + self._write_word(f, sym_ref.name_off) + # defs + num_defs = len(index.sym_defs) + self._write_word(f, num_defs) + for sym_def in index.sym_defs: + self._write_word(f, sym_def.name_off) + self._write_word(f, sym_def.value) + self._write_word(f, sym_def.sym_ctype) + # count words + num_words += 5 + num_refs + num_defs * 3 + # alignment word? + if num_words % 2 == 1: + num_words += 1 + self._write_word(f, 0) + # fill in real size + pos = f.tell() + f.seek(num_longs_pos, 0) + self._write_long(f, num_words / 2) + f.seek(pos, 0) # map the hunk types to the block classes hunk_block_type_map = { - # Load Module - HUNK_HEADER : HunkHeaderBlock, - HUNK_CODE : HunkSegmentBlock, - HUNK_DATA : HunkSegmentBlock, - HUNK_BSS : HunkSegmentBlock, - HUNK_ABSRELOC32 : HunkRelocLongBlock, - HUNK_RELOC32SHORT : HunkRelocWordBlock, - HUNK_END : HunkEndBlock, - HUNK_DEBUG : HunkDebugBlock, - HUNK_SYMBOL : HunkSymbolBlock, - # Overlays - HUNK_OVERLAY : HunkOverlayBlock, - HUNK_BREAK : HunkBreakBlock, - # Object Module - HUNK_UNIT : HunkUnitBlock, - HUNK_NAME : HunkNameBlock, - HUNK_RELRELOC16 : HunkRelocLongBlock, - HUNK_RELRELOC8 : HunkRelocLongBlock, - HUNK_DREL32 : HunkRelocLongBlock, - HUNK_DREL16 : HunkRelocLongBlock, - HUNK_DREL8 : HunkRelocLongBlock, - HUNK_EXT : HunkExtBlock, - # New Library - HUNK_LIB : HunkLibBlock, - HUNK_INDEX : HunkIndexBlock + # Load Module + HUNK_HEADER: HunkHeaderBlock, + HUNK_CODE: HunkSegmentBlock, + HUNK_DATA: HunkSegmentBlock, + HUNK_BSS: HunkSegmentBlock, + HUNK_ABSRELOC32: HunkRelocLongBlock, + HUNK_RELOC32SHORT: HunkRelocWordBlock, + HUNK_END: HunkEndBlock, + HUNK_DEBUG: HunkDebugBlock, + HUNK_SYMBOL: HunkSymbolBlock, + # Overlays + HUNK_OVERLAY: HunkOverlayBlock, + HUNK_BREAK: HunkBreakBlock, + # Object Module + HUNK_UNIT: HunkUnitBlock, + HUNK_NAME: HunkNameBlock, + HUNK_RELRELOC16: HunkRelocLongBlock, + HUNK_RELRELOC8: HunkRelocLongBlock, + HUNK_DREL32: HunkRelocLongBlock, + HUNK_DREL16: HunkRelocLongBlock, + HUNK_DREL8: HunkRelocLongBlock, + HUNK_EXT: HunkExtBlock, + # New Library + HUNK_LIB: HunkLibBlock, + HUNK_INDEX: HunkIndexBlock, } class HunkBlockFile: - """The HunkBlockFile holds the list of blocks found in a hunk file""" - def __init__(self, blocks=None): - if blocks is None: - self.blocks = [] - else: - self.blocks = blocks - - def get_blocks(self): - return self.blocks - - def set_blocks(self, blocks): - self.blocks = blocks - - def read_path(self, path_name, isLoadSeg=False, verbose=False): - f = open(path_name, "rb") - self.read(f, isLoadSeg, verbose) - f.close() - - def read(self, f, isLoadSeg=False, verbose=False): - """read a hunk file and fill block list""" - while True: - # first read block id - tag = f.read(4) - # EOF - if len(tag) == 0: - break - elif len(tag) != 4: - raise HunkParseError("Hunk block tag too short!") - blk_id = struct.unpack(">I",tag)[0] - # mask out mem flags - blk_id = blk_id & HUNK_TYPE_MASK - # look up block type - if blk_id in hunk_block_type_map: - # v37 special case: 1015 is 1020 (HUNK_RELOC32SHORT) - # we do this only in LoadSeg() files - if isLoadSeg and blk_id == 1015: - blk_id = 1020 - blk_type = hunk_block_type_map[blk_id] - # create block and parse - block = blk_type() - block.blk_id = blk_id - block.parse(f) - self.blocks.append(block) - else: - raise HunkParseError("Unsupported hunk type: %04d" % blk_id) - - def write_path(self, path_name): - f = open(path_name, "wb") - self.write(f) - f.close() - - def write(self, f, isLoadSeg=False): - """write a hunk file back to file object""" - for block in self.blocks: - # write block id - block_id = block.blk_id - # convert id - if isLoadSeg and block_id == 1020: - block_id = 1015 - block_id_raw = struct.pack(">I",block_id) - f.write(block_id_raw) - # write block itself - block.write(f) - - def detect_type(self): - """look at blocks and try to deduce the type of hunk file""" - if len(self.blocks) == 0: - return TYPE_UNKNOWN - first_block = self.blocks[0] - blk_id = first_block.blk_id - return self._map_blkid_to_type(blk_id) - - def peek_type(self, f): - """look into given file obj stream to determine file format. + """The HunkBlockFile holds the list of blocks found in a hunk file""" + + def __init__(self, blocks=None): + if blocks is None: + self.blocks = [] + else: + self.blocks = blocks + + def get_blocks(self): + return self.blocks + + def set_blocks(self, blocks): + self.blocks = blocks + + def read_path(self, path_name, isLoadSeg=False, verbose=False): + f = open(path_name, "rb") + self.read(f, isLoadSeg, verbose) + f.close() + + def read(self, f, isLoadSeg=False, verbose=False): + """read a hunk file and fill block list""" + while True: + # first read block id + tag = f.read(4) + # EOF + if len(tag) == 0: + break + elif len(tag) != 4: + raise HunkParseError("Hunk block tag too short!") + blk_id = struct.unpack(">I", tag)[0] + # mask out mem flags + blk_id = blk_id & HUNK_TYPE_MASK + # look up block type + if blk_id in hunk_block_type_map: + # v37 special case: 1015 is 1020 (HUNK_RELOC32SHORT) + # we do this only in LoadSeg() files + if isLoadSeg and blk_id == 1015: + blk_id = 1020 + blk_type = hunk_block_type_map[blk_id] + # create block and parse + block = blk_type() + block.blk_id = blk_id + block.parse(f) + self.blocks.append(block) + else: + raise HunkParseError("Unsupported hunk type: %04d" % blk_id) + + def write_path(self, path_name): + f = open(path_name, "wb") + self.write(f) + f.close() + + def write(self, f, isLoadSeg=False): + """write a hunk file back to file object""" + for block in self.blocks: + # write block id + block_id = block.blk_id + # convert id + if isLoadSeg and block_id == 1020: + block_id = 1015 + block_id_raw = struct.pack(">I", block_id) + f.write(block_id_raw) + # write block itself + block.write(f) + + def detect_type(self): + """look at blocks and try to deduce the type of hunk file""" + if len(self.blocks) == 0: + return TYPE_UNKNOWN + first_block = self.blocks[0] + blk_id = first_block.blk_id + return self._map_blkid_to_type(blk_id) + + def peek_type(self, f): + """look into given file obj stream to determine file format. stream is read and later on seek'ed back.""" - pos = f.tell() - tag = f.read(4) - # EOF - if len(tag) == 0: - return TYPE_UNKNOWN - elif len(tag) != 4: - f.seek(pos,0) - return TYPE_UNKNOWN - else: - blk_id = struct.unpack(">I",tag)[0] - f.seek(pos,0) - return self._map_blkid_to_type(blk_id) - - def _map_blkid_to_type(self, blk_id): - if blk_id == HUNK_HEADER: - return TYPE_LOADSEG - elif blk_id == HUNK_UNIT: - return TYPE_UNIT - elif blk_id == HUNK_LIB: - return TYPE_LIB - else: - return TYPE_UNKNOWN - - def get_block_type_names(self): - """return a string array with the names of all block types""" - res = [] - for blk in self.blocks: - blk_id = blk.blk_id - name = hunk_names[blk_id] - res.append(name) - return res + pos = f.tell() + tag = f.read(4) + # EOF + if len(tag) == 0: + return TYPE_UNKNOWN + elif len(tag) != 4: + f.seek(pos, 0) + return TYPE_UNKNOWN + else: + blk_id = struct.unpack(">I", tag)[0] + f.seek(pos, 0) + return self._map_blkid_to_type(blk_id) + + def _map_blkid_to_type(self, blk_id): + if blk_id == HUNK_HEADER: + return TYPE_LOADSEG + elif blk_id == HUNK_UNIT: + return TYPE_UNIT + elif blk_id == HUNK_LIB: + return TYPE_LIB + else: + return TYPE_UNKNOWN + + def get_block_type_names(self): + """return a string array with the names of all block types""" + res = [] + for blk in self.blocks: + blk_id = blk.blk_id + name = hunk_names[blk_id] + res.append(name) + return res # mini test -if __name__ == '__main__': - import sys - import io - for a in sys.argv[1:]: - # read data - f = open(a, "rb") - data = f.read() - f.close() - # parse from string stream - fobj = io.StringIO(data) - hbf = HunkBlockFile() - hbf.read(fobj, True) - fobj.close() - print(hbf.blocks) - # write to new string stream - nobj = io.StringIO() - hbf.write(nobj, True) - new_data = nobj.getvalue() - nobj.close() - # dump debug data - f = open("debug.hunk", "wb") - f.write(new_data) - f.close() - # compare read and written stream - if len(data) != len(new_data): - print("MISMATCH", len(data), len(new_data)) - else: - for i in range(len(data)): - if data[i] != new_data[i]: - print("MISMATCH @%x" % i) - print("OK") - # detect type of file - t = hbf.detect_type() - print("type=", t, type_names[t]) +if __name__ == "__main__": + import sys + import io + + for a in sys.argv[1:]: + # read data + f = open(a, "rb") + data = f.read() + f.close() + # parse from string stream + fobj = io.StringIO(data) + hbf = HunkBlockFile() + hbf.read(fobj, True) + fobj.close() + print(hbf.blocks) + # write to new string stream + nobj = io.StringIO() + hbf.write(nobj, True) + new_data = nobj.getvalue() + nobj.close() + # dump debug data + f = open("debug.hunk", "wb") + f.write(new_data) + f.close() + # compare read and written stream + if len(data) != len(new_data): + print("MISMATCH", len(data), len(new_data)) + else: + for i in range(len(data)): + if data[i] != new_data[i]: + print("MISMATCH @%x" % i) + print("OK") + # detect type of file + t = hbf.detect_type() + print("type=", t, type_names[t]) diff --git a/amitools/binfmt/hunk/HunkDebug.py b/amitools/binfmt/hunk/HunkDebug.py index 1f52d479..852fd4bb 100644 --- a/amitools/binfmt/hunk/HunkDebug.py +++ b/amitools/binfmt/hunk/HunkDebug.py @@ -1,156 +1,157 @@ - import struct import io class HunkDebugLineEntry: - def __init__(self, offset, src_line): - self.offset = offset - self.src_line = src_line + def __init__(self, offset, src_line): + self.offset = offset + self.src_line = src_line - def __str__(self): - return "[+%08x: %d]" % (self.offset, self.src_line) + def __str__(self): + return "[+%08x: %d]" % (self.offset, self.src_line) - def get_offset(self): - return self.offset + def get_offset(self): + return self.offset - def get_src_line(self): - return self.src_line + def get_src_line(self): + return self.src_line class HunkDebugLine: - """structure to hold source line info""" - def __init__(self, src_file, base_offset): - self.tag = 'LINE' - self.src_file = src_file - self.base_offset = base_offset - self.entries = [] + """structure to hold source line info""" + + def __init__(self, src_file, base_offset): + self.tag = "LINE" + self.src_file = src_file + self.base_offset = base_offset + self.entries = [] - def add_entry(self, offset, src_line): - self.entries.append(HunkDebugLineEntry(offset, src_line)) + def add_entry(self, offset, src_line): + self.entries.append(HunkDebugLineEntry(offset, src_line)) - def __str__(self): - prefix = "{%s,%s,@%08x:" % (self.tag, self.src_file, self.base_offset) - return prefix + ",".join(map(str,self.entries)) + "}" + def __str__(self): + prefix = "{%s,%s,@%08x:" % (self.tag, self.src_file, self.base_offset) + return prefix + ",".join(map(str, self.entries)) + "}" - def get_src_file(self): - return self.src_file + def get_src_file(self): + return self.src_file - def get_base_offset(self): - return self.base_offset + def get_base_offset(self): + return self.base_offset - def get_entries(self): - return self.entries + def get_entries(self): + return self.entries class HunkDebugAny: - def __init__(self, tag, data, base_offset): - self.tag = tag - self.data = data - self.base_offset = base_offset + def __init__(self, tag, data, base_offset): + self.tag = tag + self.data = data + self.base_offset = base_offset - def __str__(self): - return "{%s,%d,%s}" % (self.tag, self.base_offset, self.data) + def __str__(self): + return "{%s,%d,%s}" % (self.tag, self.base_offset, self.data) class HunkDebug: - def encode(self, debug_info): - """encode a debug info and return a debug_data chunk""" - out = io.StringIO() - # +0: base offset - self._write_long(out, debug_info.base_offset) - # +4: type tag - tag = debug_info.tag - out.write(tag) - if tag == 'LINE': - # file name - self._write_string(out, debug_info.src_file) - # entries - for e in debug_info.entries: - self._write_long(out, e.src_line) - self._write_long(out, e.offset) - elif tag == 'HEAD': - out.write("DBGV01\0\0") - out.write(debug_info.data) - else: # any - out.write(debug_info.data) - # retrieve result - res = out.getvalue() - out.close() - return res - - def decode(self, debug_data): - """decode a data block from a debug hunk""" - if len(debug_data) < 12: - return None - # +0: base_offset for file - base_offset = self._read_long(debug_data, 0) - # +4: tag - tag = debug_data[4:8] - if tag == 'LINE': # SAS/C source line info - # +8: string file name - src_file, src_size = self._read_string(debug_data, 8) - dl = HunkDebugLine(src_file, base_offset) - off = 12 + src_size - num = (len(debug_data) - off) // 8 - for i in range(num): - src_line = self._read_long(debug_data, off) - offset = self._read_long(debug_data, off+4) - off += 8 - dl.add_entry(offset, src_line) - return dl - elif tag == 'HEAD': - tag2 = debug_data[8:16] - assert tag2 == "DBGV01\0\0" - data = debug_data[16:] - return HunkDebugAny(tag, data, base_offset) - else: - data = debug_data[8:] - return HunkDebugAny(tag, data, base_offset) - - def _read_string(self, buf, pos): - size = self._read_long(buf,pos) * 4 - off = pos + 4 - data = buf[off:off+size] - pos = data.find('\0') - if pos == 0: - return "", size - elif pos != -1: - return data[:pos], size - else: - return data, size - - def _write_string(self, f, s): - n = len(s) - num_longs = int((n + 3)/4) - self._write_long(f, num_longs) - add = num_longs * 4 - n - if add > 0: - s += '\0' * add - f.write(s) - - def _read_long(self, buf, pos): - return struct.unpack_from(">I",buf,pos)[0] - - def _write_long(self, f, v): - data = struct.pack(">I",v) - f.write(data) + def encode(self, debug_info): + """encode a debug info and return a debug_data chunk""" + out = io.StringIO() + # +0: base offset + self._write_long(out, debug_info.base_offset) + # +4: type tag + tag = debug_info.tag + out.write(tag) + if tag == "LINE": + # file name + self._write_string(out, debug_info.src_file) + # entries + for e in debug_info.entries: + self._write_long(out, e.src_line) + self._write_long(out, e.offset) + elif tag == "HEAD": + out.write("DBGV01\0\0") + out.write(debug_info.data) + else: # any + out.write(debug_info.data) + # retrieve result + res = out.getvalue() + out.close() + return res + + def decode(self, debug_data): + """decode a data block from a debug hunk""" + if len(debug_data) < 12: + return None + # +0: base_offset for file + base_offset = self._read_long(debug_data, 0) + # +4: tag + tag = debug_data[4:8] + if tag == "LINE": # SAS/C source line info + # +8: string file name + src_file, src_size = self._read_string(debug_data, 8) + dl = HunkDebugLine(src_file, base_offset) + off = 12 + src_size + num = (len(debug_data) - off) // 8 + for i in range(num): + src_line = self._read_long(debug_data, off) + offset = self._read_long(debug_data, off + 4) + off += 8 + dl.add_entry(offset, src_line) + return dl + elif tag == "HEAD": + tag2 = debug_data[8:16] + assert tag2 == "DBGV01\0\0" + data = debug_data[16:] + return HunkDebugAny(tag, data, base_offset) + else: + data = debug_data[8:] + return HunkDebugAny(tag, data, base_offset) + + def _read_string(self, buf, pos): + size = self._read_long(buf, pos) * 4 + off = pos + 4 + data = buf[off : off + size] + pos = data.find("\0") + if pos == 0: + return "", size + elif pos != -1: + return data[:pos], size + else: + return data, size + + def _write_string(self, f, s): + n = len(s) + num_longs = int((n + 3) / 4) + self._write_long(f, num_longs) + add = num_longs * 4 - n + if add > 0: + s += "\0" * add + f.write(s) + + def _read_long(self, buf, pos): + return struct.unpack_from(">I", buf, pos)[0] + + def _write_long(self, f, v): + data = struct.pack(">I", v) + f.write(data) # ----- mini test ----- -if __name__ == '__main__': - import sys - from .HunkBlockFile import HunkBlockFile, HunkDebugBlock - hd = HunkDebug() - for a in sys.argv[1:]: - hbf = HunkBlockFile() - hbf.read_path(a) - for blk in hbf.get_blocks(): - if isinstance(blk, HunkDebugBlock): - # decode debug data - dd = hd.decode(blk.debug_data) - print(a,"->",dd.tag) - # now encode again - new_debug_data = hd.encode(dd) - # compare! - assert new_debug_data == blk.debug_data +if __name__ == "__main__": + import sys + from .HunkBlockFile import HunkBlockFile, HunkDebugBlock + + hd = HunkDebug() + for a in sys.argv[1:]: + hbf = HunkBlockFile() + hbf.read_path(a) + for blk in hbf.get_blocks(): + if isinstance(blk, HunkDebugBlock): + # decode debug data + dd = hd.decode(blk.debug_data) + print(a, "->", dd.tag) + # now encode again + new_debug_data = hd.encode(dd) + # compare! + assert new_debug_data == blk.debug_data diff --git a/amitools/binfmt/hunk/HunkDisassembler.py b/amitools/binfmt/hunk/HunkDisassembler.py index 5414908a..f87ad100 100644 --- a/amitools/binfmt/hunk/HunkDisassembler.py +++ b/amitools/binfmt/hunk/HunkDisassembler.py @@ -1,209 +1,218 @@ from amitools.vamos.machine import DisAsm from . import Hunk + class HunkDisassembler: - - def __init__(self, cpu='68000'): - self.disasm = DisAsm.create(cpu) - - def get_symtab(self, hunk): - for h in hunk[1:]: - if h['type'] == Hunk.HUNK_SYMBOL: - return h['symbols'] - return None - - def find_symbol(self, hunk, offset, always): - symtab = self.get_symtab(hunk) - if symtab == None: - if always: - return "%08x" % offset - else: + def __init__(self, cpu="68000"): + self.disasm = DisAsm.create(cpu) + + def get_symtab(self, hunk): + for h in hunk[1:]: + if h["type"] == Hunk.HUNK_SYMBOL: + return h["symbols"] return None - symmap = {} - for s in symtab: - symmap[s[1]] = s[0] - - offs = sorted(symmap.keys()); - last = None - last_offset = 0 - for o in offs: - if o == offset: - return symmap[o] - - if always: - if o < offset: - # approximate to last symbol - if last != None: - return last + " + %08x" % (o - last_offset) - else: + def find_symbol(self, hunk, offset, always): + symtab = self.get_symtab(hunk) + if symtab == None: + if always: + return "%08x" % offset + else: + return None + + symmap = {} + for s in symtab: + symmap[s[1]] = s[0] + + offs = sorted(symmap.keys()) + last = None + last_offset = 0 + for o in offs: + if o == offset: + return symmap[o] + + if always: + if o < offset: + # approximate to last symbol + if last != None: + return last + " + %08x" % (o - last_offset) + else: + return "%08x" % offset + last = symmap[o] + last_offset = o + + if always: return "%08x" % offset - last = symmap[o] - last_offset = o - - if always: - return "%08x" % offset - else: - return None - - def find_src_line(self, hunk, addr): - for h in hunk[1:]: - if h['type'] == Hunk.HUNK_DEBUG and h['debug_type'] == 'LINE': - src_map = h['src_map'] - for e in src_map: - src_line = e[0] - src_addr = e[1] + h['debug_offset'] - if src_addr == addr: - return (h['src_file'],src_line) - return None - - # map reloc type to number of words to be relocated - map_reloc_to_num_words = { - Hunk.HUNK_ABSRELOC32 : 2, - Hunk.HUNK_DREL16 : 1, - Hunk.HUNK_DREL32 : 2 - } - - # find_reloc - # return - # 0 - rel_offset to addr reloc begin (in words) - # 1 - size of reloc (in words) - # 2 - hunk number reloc references - # 3 - relative offset in hunk (in bytes) - # 4 - reloc hunk - def find_reloc(self, hunk, addr, word): - end_addr = addr + len(word) * 2 - for h in hunk[1:]: - valid = h['type'] in self.map_reloc_to_num_words - if valid: - num_words = self.map_reloc_to_num_words[h['type']] - reloc = h['reloc'] - for hunk_num in reloc: - offsets = reloc[hunk_num] - for off in offsets: - if off >= addr and off + num_words * 2 <= end_addr: - word_offset = (off - addr) // 2 # in words - - # calc offset - addr = 0 - for i in range(num_words): - addr = addr * 0x10000 + word[word_offset+i] - - reloc_type_name = h['type_name'].replace("HUNK_","").lower() - return (word_offset, num_words, hunk_num, addr, reloc_type_name) - return None - - map_ext_ref_to_num_words = { - Hunk.EXT_ABSREF32 : 2, - Hunk.EXT_RELREF16: 1, - Hunk.EXT_DEXT16: 1 - } - - # find_ext_ref - # return - # 0 - word offset to word begin (in words) - # 1 - size of reloc (in words) - # 2 - name of external symbol - # 3 - type name of ext ref - def find_ext_ref(self, hunk, addr, word): - end_addr = addr + len(word) * 2 - for h in hunk[1:]: - if h['type'] == Hunk.HUNK_EXT: - for ext in h['ext_ref']: - refs = ext['refs'] - valid = ext['type'] in self.map_ext_ref_to_num_words - if valid: - num_words = self.map_ext_ref_to_num_words[ext['type']] - for ref in refs: - if ref >= addr and ref < end_addr: - word_offset = (ref - addr) // 2 - type_name = ext['type_name'].replace("EXT_","").lower() - return (word_offset, num_words, ext['name'], type_name) - return None - - # search the HUNK_EXT for a defintion - def find_ext_def(self, hunk, addr): - for h in hunk[1:]: - if h['type'] == Hunk.HUNK_EXT: - for ext in h['ext_def']: - if addr == ext['def']: - return ext['name'] - return None - - # search the index of a lib for a definition - def find_index_def(self, hunk, addr): - main = hunk[0] - if 'index_hunk' in main: - info = main['index_hunk'] - if 'defs' in info: - for d in info['defs']: - if d['value'] == addr: - return d['name'] - return None - - def find_symbol_or_def(self, hunk, addr, always): - symbol = self.find_symbol(hunk, addr, False) - if symbol == None: - symbol = self.find_ext_def(hunk, addr) - if symbol == None: - symbol = self.find_index_def(hunk, addr) - if symbol == None and always: - return "%08x" % addr - return symbol - - # ----- show disassembly ----- - - def show_disassembly(self, hunk, seg_list, start): - main = hunk[0] - lines = self.disasm.disassemble_block(main['data'],start) - # show line by line - for l in lines: - addr = l[0] - word = l[1] - code = l[2] - - # try to find a symbol for this addr - symbol = self.find_symbol_or_def(hunk, addr, False) - - # create line info - info = [] - - # find source line info - line = self.find_src_line(hunk,addr) - if line != None: - (src_file, src_line) = line - info.append( "src: %s:%d" % (src_file,src_line)) - - # find an extref - ext_ref = self.find_ext_ref(hunk,addr,word) - if ext_ref != None: - ref_symbol = ext_ref[2] - ref_type = ext_ref[3] - info.append( "%s: %s" % (ref_type, ref_symbol) ) - - # find a relocation - reloc = self.find_reloc(hunk,addr,word) - if reloc != None: - hunk_num = reloc[2] - offset = reloc[3] - reloc_type_name = reloc[4] - # a self reference - reloc_symbol = self.find_symbol_or_def(seg_list[hunk_num],offset,True) - if hunk_num == main['hunk_no']: - src = "self" else: - src = "#%03d %s" % (hunk_num, seg_list[hunk_num][0]['type_name']) - info.append( "%s: %s: %s" % (reloc_type_name, src, reloc_symbol) ) - - # build comment from all infos - if len(info) > 0: - comment = "; " + ", ".join(info) - else: - comment = "" - - # create final line - if symbol != None: - print("\t\t\t\t%s:" % symbol) - print("%08x\t%-20s\t%-30s %s" % (addr," ".join(["%04x" %x for x in word]),code,comment)) - - \ No newline at end of file + return None + + def find_src_line(self, hunk, addr): + for h in hunk[1:]: + if h["type"] == Hunk.HUNK_DEBUG and h["debug_type"] == "LINE": + src_map = h["src_map"] + for e in src_map: + src_line = e[0] + src_addr = e[1] + h["debug_offset"] + if src_addr == addr: + return (h["src_file"], src_line) + return None + + # map reloc type to number of words to be relocated + map_reloc_to_num_words = { + Hunk.HUNK_ABSRELOC32: 2, + Hunk.HUNK_DREL16: 1, + Hunk.HUNK_DREL32: 2, + } + + # find_reloc + # return + # 0 - rel_offset to addr reloc begin (in words) + # 1 - size of reloc (in words) + # 2 - hunk number reloc references + # 3 - relative offset in hunk (in bytes) + # 4 - reloc hunk + def find_reloc(self, hunk, addr, word): + end_addr = addr + len(word) * 2 + for h in hunk[1:]: + valid = h["type"] in self.map_reloc_to_num_words + if valid: + num_words = self.map_reloc_to_num_words[h["type"]] + reloc = h["reloc"] + for hunk_num in reloc: + offsets = reloc[hunk_num] + for off in offsets: + if off >= addr and off + num_words * 2 <= end_addr: + word_offset = (off - addr) // 2 # in words + + # calc offset + addr = 0 + for i in range(num_words): + addr = addr * 0x10000 + word[word_offset + i] + + reloc_type_name = ( + h["type_name"].replace("HUNK_", "").lower() + ) + return ( + word_offset, + num_words, + hunk_num, + addr, + reloc_type_name, + ) + return None + + map_ext_ref_to_num_words = { + Hunk.EXT_ABSREF32: 2, + Hunk.EXT_RELREF16: 1, + Hunk.EXT_DEXT16: 1, + } + + # find_ext_ref + # return + # 0 - word offset to word begin (in words) + # 1 - size of reloc (in words) + # 2 - name of external symbol + # 3 - type name of ext ref + def find_ext_ref(self, hunk, addr, word): + end_addr = addr + len(word) * 2 + for h in hunk[1:]: + if h["type"] == Hunk.HUNK_EXT: + for ext in h["ext_ref"]: + refs = ext["refs"] + valid = ext["type"] in self.map_ext_ref_to_num_words + if valid: + num_words = self.map_ext_ref_to_num_words[ext["type"]] + for ref in refs: + if ref >= addr and ref < end_addr: + word_offset = (ref - addr) // 2 + type_name = ext["type_name"].replace("EXT_", "").lower() + return (word_offset, num_words, ext["name"], type_name) + return None + + # search the HUNK_EXT for a defintion + def find_ext_def(self, hunk, addr): + for h in hunk[1:]: + if h["type"] == Hunk.HUNK_EXT: + for ext in h["ext_def"]: + if addr == ext["def"]: + return ext["name"] + return None + + # search the index of a lib for a definition + def find_index_def(self, hunk, addr): + main = hunk[0] + if "index_hunk" in main: + info = main["index_hunk"] + if "defs" in info: + for d in info["defs"]: + if d["value"] == addr: + return d["name"] + return None + + def find_symbol_or_def(self, hunk, addr, always): + symbol = self.find_symbol(hunk, addr, False) + if symbol == None: + symbol = self.find_ext_def(hunk, addr) + if symbol == None: + symbol = self.find_index_def(hunk, addr) + if symbol == None and always: + return "%08x" % addr + return symbol + + # ----- show disassembly ----- + + def show_disassembly(self, hunk, seg_list, start): + main = hunk[0] + lines = self.disasm.disassemble_block(main["data"], start) + # show line by line + for l in lines: + addr = l[0] + word = l[1] + code = l[2] + + # try to find a symbol for this addr + symbol = self.find_symbol_or_def(hunk, addr, False) + + # create line info + info = [] + + # find source line info + line = self.find_src_line(hunk, addr) + if line != None: + (src_file, src_line) = line + info.append("src: %s:%d" % (src_file, src_line)) + + # find an extref + ext_ref = self.find_ext_ref(hunk, addr, word) + if ext_ref != None: + ref_symbol = ext_ref[2] + ref_type = ext_ref[3] + info.append("%s: %s" % (ref_type, ref_symbol)) + + # find a relocation + reloc = self.find_reloc(hunk, addr, word) + if reloc != None: + hunk_num = reloc[2] + offset = reloc[3] + reloc_type_name = reloc[4] + # a self reference + reloc_symbol = self.find_symbol_or_def(seg_list[hunk_num], offset, True) + if hunk_num == main["hunk_no"]: + src = "self" + else: + src = "#%03d %s" % (hunk_num, seg_list[hunk_num][0]["type_name"]) + info.append("%s: %s: %s" % (reloc_type_name, src, reloc_symbol)) + + # build comment from all infos + if len(info) > 0: + comment = "; " + ", ".join(info) + else: + comment = "" + + # create final line + if symbol != None: + print("\t\t\t\t%s:" % symbol) + print( + "%08x\t%-20s\t%-30s %s" + % (addr, " ".join(["%04x" % x for x in word]), code, comment) + ) diff --git a/amitools/binfmt/hunk/HunkLoadSegFile.py b/amitools/binfmt/hunk/HunkLoadSegFile.py index f13f01c0..6ac7d7f2 100644 --- a/amitools/binfmt/hunk/HunkLoadSegFile.py +++ b/amitools/binfmt/hunk/HunkLoadSegFile.py @@ -1,275 +1,278 @@ - - - from .HunkBlockFile import * from .HunkDebug import HunkDebug class HunkSegment: - """holds a code, data, or bss hunk/segment""" - def __init__(self): - self.blocks = None - self.seg_blk = None - self.symbol_blk = None - self.reloc_blks = None - self.debug_blks = None - self.debug_infos = None + """holds a code, data, or bss hunk/segment""" - def __repr__(self): - return "[seg=%s,symbol=%s,reloc=%s,debug=%s,debug_info=%s]" % \ - (self._blk_str(self.seg_blk), - self._blk_str(self.symbol_blk), - self._blk_str_list(self.reloc_blks), - self._blk_str_list(self.debug_blks), - self._debug_infos_str()) + def __init__(self): + self.blocks = None + self.seg_blk = None + self.symbol_blk = None + self.reloc_blks = None + self.debug_blks = None + self.debug_infos = None - def setup_code(self, data): - data, size_longs = self._pad_data(data) - self.seg_blk = HunkSegmentBlock(HUNK_CODE, data, size_longs) + def __repr__(self): + return "[seg=%s,symbol=%s,reloc=%s,debug=%s,debug_info=%s]" % ( + self._blk_str(self.seg_blk), + self._blk_str(self.symbol_blk), + self._blk_str_list(self.reloc_blks), + self._blk_str_list(self.debug_blks), + self._debug_infos_str(), + ) - def setup_data(self, data): - data, size_longs = self._pad_data(data) - self.seg_blk = HunkSegmentBlock(HUNK_DATA, data, size_longs) + def setup_code(self, data): + data, size_longs = self._pad_data(data) + self.seg_blk = HunkSegmentBlock(HUNK_CODE, data, size_longs) - def _pad_data(self, data): - size_bytes = len(data) - bytes_mod = size_bytes % 4 - if bytes_mod > 0: - add = 4 - bytes_mod - data = data + '\0' * add - size_long = int((size_bytes + 3)/4) - return data, size_long + def setup_data(self, data): + data, size_longs = self._pad_data(data) + self.seg_blk = HunkSegmentBlock(HUNK_DATA, data, size_longs) - def setup_bss(self, size_bytes): - size_longs = int((size_bytes + 3)/4) - self.seg_blk = HunkSegmentBlock(HUNK_BSS, None, size_longs) + def _pad_data(self, data): + size_bytes = len(data) + bytes_mod = size_bytes % 4 + if bytes_mod > 0: + add = 4 - bytes_mod + data = data + "\0" * add + size_long = int((size_bytes + 3) / 4) + return data, size_long - def setup_relocs(self, relocs, force_long=False): - """relocs: ((hunk_num, (off1, off2, ...)), ...)""" - if force_long: - use_short = False - else: - use_short = self._are_relocs_short(relocs) - if use_short: - self.reloc_blks = [HunkRelocWordBlock(HUNK_RELOC32SHORT, relocs)] - else: - self.reloc_blks = [HunkRelocLongBlock(HUNK_ABSRELOC32, relocs)] + def setup_bss(self, size_bytes): + size_longs = int((size_bytes + 3) / 4) + self.seg_blk = HunkSegmentBlock(HUNK_BSS, None, size_longs) - def setup_symbols(self, symbols): - """symbols: ((name, off), ...)""" - self.symbol_blk = HunkSymbolBlock(symbols) - - def setup_debug(self, debug_info): - if self.debug_infos is None: - self.debug_infos = [] - self.debug_infos.append(debug_info) - hd = HunkDebug() - debug_data = hd.encode(debug_info) - blk = HunkDebugBlock(debug_data) - if self.debug_blks is None: - self.debug_blks = [] - self.debug_blks.append(blk) + def setup_relocs(self, relocs, force_long=False): + """relocs: ((hunk_num, (off1, off2, ...)), ...)""" + if force_long: + use_short = False + else: + use_short = self._are_relocs_short(relocs) + if use_short: + self.reloc_blks = [HunkRelocWordBlock(HUNK_RELOC32SHORT, relocs)] + else: + self.reloc_blks = [HunkRelocLongBlock(HUNK_ABSRELOC32, relocs)] - def _are_relocs_short(self, relocs): - for hunk_num, offsets in relocs: - for off in offsets: - if off > 65535: - return False - return True + def setup_symbols(self, symbols): + """symbols: ((name, off), ...)""" + self.symbol_blk = HunkSymbolBlock(symbols) - def _debug_infos_str(self): - if self.debug_infos is None: - return "n/a" - else: - return ",".join(map(str, self.debug_infos)) + def setup_debug(self, debug_info): + if self.debug_infos is None: + self.debug_infos = [] + self.debug_infos.append(debug_info) + hd = HunkDebug() + debug_data = hd.encode(debug_info) + blk = HunkDebugBlock(debug_data) + if self.debug_blks is None: + self.debug_blks = [] + self.debug_blks.append(blk) - def _blk_str(self, blk): - if blk is None: - return "n/a" - else: - return hunk_names[blk.blk_id] + def _are_relocs_short(self, relocs): + for hunk_num, offsets in relocs: + for off in offsets: + if off > 65535: + return False + return True - def _blk_str_list(self, blk_list): - res = [] - if blk_list is None: - return "n/a" - for blk in blk_list: - res.append(hunk_names[blk.blk_id]) - return ",".join(res) + def _debug_infos_str(self): + if self.debug_infos is None: + return "n/a" + else: + return ",".join(map(str, self.debug_infos)) - def parse(self, blocks): - hd = HunkDebug() - self.blocks = blocks - for blk in blocks: - blk_id = blk.blk_id - if blk_id in loadseg_valid_begin_hunks: - self.seg_blk = blk - elif blk_id == HUNK_SYMBOL: - if self.symbol_blk is None: - self.symbol_blk = blk + def _blk_str(self, blk): + if blk is None: + return "n/a" else: - raise HunkParserError("duplicate symbols in hunk") - elif blk_id == HUNK_DEBUG: - if self.debug_blks is None: - self.debug_blks = [] - self.debug_blks.append(blk) - # decode hunk debug info - debug_info = hd.decode(blk.debug_data) - if debug_info is not None: - if self.debug_infos is None: - self.debug_infos = [] - self.debug_infos.append(debug_info) - elif blk_id in (HUNK_ABSRELOC32, HUNK_RELOC32SHORT): - if self.reloc_blks is None: - self.reloc_blks = [] - self.reloc_blks.append(blk) - else: - raise HunkParseError("invalid hunk block") + return hunk_names[blk.blk_id] + + def _blk_str_list(self, blk_list): + res = [] + if blk_list is None: + return "n/a" + for blk in blk_list: + res.append(hunk_names[blk.blk_id]) + return ",".join(res) - def create(self, blocks): - # already has blocks? - if self.blocks is not None: - blocks += self.blocks - return self.seg_blk.size_longs - # start with segment block - if self.seg_blk is None: - raise HunkParseError("no segment block!") - self.blocks = [self.seg_blk] - # has relocations - if self.reloc_blks is not None: - self.blocks += self.reloc_blks - # has debug? - if self.debug_blks is not None: - self.blocks += self.debug_blks - # has symbols? - if self.symbol_blk is not None: - self.blocks.append(self.symbol_blk) - # store blocks - blocks += self.blocks - # return size of segment - return self.seg_blk.size_longs + def parse(self, blocks): + hd = HunkDebug() + self.blocks = blocks + for blk in blocks: + blk_id = blk.blk_id + if blk_id in loadseg_valid_begin_hunks: + self.seg_blk = blk + elif blk_id == HUNK_SYMBOL: + if self.symbol_blk is None: + self.symbol_blk = blk + else: + raise HunkParserError("duplicate symbols in hunk") + elif blk_id == HUNK_DEBUG: + if self.debug_blks is None: + self.debug_blks = [] + self.debug_blks.append(blk) + # decode hunk debug info + debug_info = hd.decode(blk.debug_data) + if debug_info is not None: + if self.debug_infos is None: + self.debug_infos = [] + self.debug_infos.append(debug_info) + elif blk_id in (HUNK_ABSRELOC32, HUNK_RELOC32SHORT): + if self.reloc_blks is None: + self.reloc_blks = [] + self.reloc_blks.append(blk) + else: + raise HunkParseError("invalid hunk block") + + def create(self, blocks): + # already has blocks? + if self.blocks is not None: + blocks += self.blocks + return self.seg_blk.size_longs + # start with segment block + if self.seg_blk is None: + raise HunkParseError("no segment block!") + self.blocks = [self.seg_blk] + # has relocations + if self.reloc_blks is not None: + self.blocks += self.reloc_blks + # has debug? + if self.debug_blks is not None: + self.blocks += self.debug_blks + # has symbols? + if self.symbol_blk is not None: + self.blocks.append(self.symbol_blk) + # store blocks + blocks += self.blocks + # return size of segment + return self.seg_blk.size_longs class HunkLoadSegFile: - """manage a LoadSeg() hunk file starting with HUNK_HEADER""" - def __init__(self): - self.hdr_blk = None - self.segments = [] + """manage a LoadSeg() hunk file starting with HUNK_HEADER""" + + def __init__(self): + self.hdr_blk = None + self.segments = [] - def get_segments(self): - return self.segments + def get_segments(self): + return self.segments - def add_segment(self, seg): - self.segments.append(seg) + def add_segment(self, seg): + self.segments.append(seg) - def parse_block_file(self, bf): - """assign hunk blocks into segments""" - # get file blocks - blks = bf.get_blocks() - if blks is None or len(blks) == 0: - raise HunkParseError("no hunk blocks found!") - # ensure its a HUNK_HEADER - hdr_blk = blks[0] - if hdr_blk.blk_id != HUNK_HEADER: - raise HunkParseError("no HEADER block found!") - self.hdr_blk = hdr_blk - # first round: split block list into sections seperated by END - first = [] - cur = None - for blk in blks[1:]: - blk_id = blk.blk_id - # split by END block - if blk_id == HUNK_END: + def parse_block_file(self, bf): + """assign hunk blocks into segments""" + # get file blocks + blks = bf.get_blocks() + if blks is None or len(blks) == 0: + raise HunkParseError("no hunk blocks found!") + # ensure its a HUNK_HEADER + hdr_blk = blks[0] + if hdr_blk.blk_id != HUNK_HEADER: + raise HunkParseError("no HEADER block found!") + self.hdr_blk = hdr_blk + # first round: split block list into sections seperated by END + first = [] cur = None - # add non end block to list - else: - # check validity of block - if blk_id not in loadseg_valid_begin_hunks and \ - blk_id not in loadseg_valid_extra_hunks: - raise HunkParseError("invalid block found: %d" % blk_id) - if cur is None: - cur = [] - first.append(cur) - cur.append(blk) - # second round: split list if two segments are found in a single list - # this is only necessary for broken files that lack END blocks - second = [] - for l in first: - pos_seg = [] - off = 0 - for blk in l: - if blk.blk_id in loadseg_valid_begin_hunks: - pos_seg.append(off) - off+=1 - n = len(pos_seg) - if n == 1: - # list is ok - second.append(l) - elif n > 1: - # list needs split - # we can only split if no extra block is before next segment block - new_list = None - for blk in l: - if blk.blk_id in loadseg_valid_begin_hunks: - new_list = [blk] - second.append(new_list) - elif new_list is not None: - new_list.append(blk) - else: - raise HunkParseError("can't split block list") - # check size of hunk table - if len(hdr_blk.hunk_table) != len(second): - raise HunkParseError("can't match hunks to header") - # convert block lists into segments - for l in second: - seg = HunkSegment() - seg.parse(l) - self.segments.append(seg) - # set size in segments - n = len(second) - for i in range(n): - self.segments[i].size_longs = hdr_blk.hunk_table[i] - self.segments[i].size = self.segments[i].size_longs * 4 + for blk in blks[1:]: + blk_id = blk.blk_id + # split by END block + if blk_id == HUNK_END: + cur = None + # add non end block to list + else: + # check validity of block + if ( + blk_id not in loadseg_valid_begin_hunks + and blk_id not in loadseg_valid_extra_hunks + ): + raise HunkParseError("invalid block found: %d" % blk_id) + if cur is None: + cur = [] + first.append(cur) + cur.append(blk) + # second round: split list if two segments are found in a single list + # this is only necessary for broken files that lack END blocks + second = [] + for l in first: + pos_seg = [] + off = 0 + for blk in l: + if blk.blk_id in loadseg_valid_begin_hunks: + pos_seg.append(off) + off += 1 + n = len(pos_seg) + if n == 1: + # list is ok + second.append(l) + elif n > 1: + # list needs split + # we can only split if no extra block is before next segment block + new_list = None + for blk in l: + if blk.blk_id in loadseg_valid_begin_hunks: + new_list = [blk] + second.append(new_list) + elif new_list is not None: + new_list.append(blk) + else: + raise HunkParseError("can't split block list") + # check size of hunk table + if len(hdr_blk.hunk_table) != len(second): + raise HunkParseError("can't match hunks to header") + # convert block lists into segments + for l in second: + seg = HunkSegment() + seg.parse(l) + self.segments.append(seg) + # set size in segments + n = len(second) + for i in range(n): + self.segments[i].size_longs = hdr_blk.hunk_table[i] + self.segments[i].size = self.segments[i].size_longs * 4 - def create_block_file(self): - """create a HunkBlockFile from the segments given""" - # setup header block - self.hdr_blk = HunkHeaderBlock() - blks = [self.hdr_blk] - sizes = [] - for seg in self.segments: - size = seg.create(blks) - sizes.append(size) - # add HUNK_END - blks.append(HunkEndBlock()) - # finally setup header - self.hdr_blk.setup(sizes) - # create HunkBlockFile - return HunkBlockFile(blks) + def create_block_file(self): + """create a HunkBlockFile from the segments given""" + # setup header block + self.hdr_blk = HunkHeaderBlock() + blks = [self.hdr_blk] + sizes = [] + for seg in self.segments: + size = seg.create(blks) + sizes.append(size) + # add HUNK_END + blks.append(HunkEndBlock()) + # finally setup header + self.hdr_blk.setup(sizes) + # create HunkBlockFile + return HunkBlockFile(blks) # mini test -if __name__ == '__main__': - import sys - for a in sys.argv[1:]: - bf = HunkBlockFile() - bf.read_path(a, isLoadSeg=True) - print(bf.get_block_type_names()) - lsf = HunkLoadSegFile() - lsf.parse_block_file(bf) - print(lsf.get_segments()) - # write back - new_bf = lsf.create_block_file() - new_bf.write_path("a.out") - # compare read and written stream - with open(a, "rb") as fh: - data = fh.read() - with open("a.out", "rb") as fh: - new_data = fh.read() - if len(data) != len(new_data): - print("MISMATCH", len(data), len(new_data)) - else: - for i in range(len(data)): - if data[i] != new_data[i]: - print("MISMATCH @%x" % i) - print("OK") +if __name__ == "__main__": + import sys + + for a in sys.argv[1:]: + bf = HunkBlockFile() + bf.read_path(a, isLoadSeg=True) + print(bf.get_block_type_names()) + lsf = HunkLoadSegFile() + lsf.parse_block_file(bf) + print(lsf.get_segments()) + # write back + new_bf = lsf.create_block_file() + new_bf.write_path("a.out") + # compare read and written stream + with open(a, "rb") as fh: + data = fh.read() + with open("a.out", "rb") as fh: + new_data = fh.read() + if len(data) != len(new_data): + print("MISMATCH", len(data), len(new_data)) + else: + for i in range(len(data)): + if data[i] != new_data[i]: + print("MISMATCH @%x" % i) + print("OK") diff --git a/amitools/binfmt/hunk/HunkReader.py b/amitools/binfmt/hunk/HunkReader.py index 9b4b1470..f4a0edfd 100644 --- a/amitools/binfmt/hunk/HunkReader.py +++ b/amitools/binfmt/hunk/HunkReader.py @@ -6,965 +6,1055 @@ from types import * from .Hunk import * + class HunkReader: - """Load Amiga executable Hunk structures""" - - def __init__(self): - self.hunks = [] - self.error_string = None - self.type = None - self.header = None - self.segments = [] - self.overlay = None - self.overlay_headers = None - self.overlay_segments = None - self.libs = None - self.units = None - - def get_struct_summary(self, obj): - if type(obj) == ListType: - result = [] - for a in obj: - v = self.get_struct_summary(a) - if v != None: - result.append(v) - return "[" + ",".join(result) + "]" - elif type(obj) == DictType: - if 'type_name' in obj: - type_name = obj['type_name'] - return type_name.replace('HUNK_','') - else: - result = [] - for k in list(obj.keys()): - v = self.get_struct_summary(obj[k]) - if v != None: - result.append(k + ":" + v) - return '{' + ",".join(result) + '}' - else: - return None - - def get_long(self, data): - return struct.unpack(">I",data)[0] - - def read_long(self, f): - data = f.read(4) - if len(data) == 0: - return -1 - elif len(data) != 4: - return -(len(data)+1) - return struct.unpack(">I",data)[0] - - def read_word(self, f): - data = f.read(2) - if len(data) == 0: - return -1 - elif len(data) != 2: - return -(len(data)+1) - return struct.unpack(">H",data)[0] - - def read_name(self, f): - num_longs = self.read_long(f) - if num_longs < 0: - return -1,None - elif num_longs == 0: - return 0,"" - else: - return self.read_name_size(f, num_longs) - - def read_tag(self, f): - data = f.read(4) - if len(data) == 0: - return -1; - elif len(data) != 4: - return -(len(data)+1) - return data - - def read_name_size(self, f, num_longs): - size = (num_longs & 0xffffff) * 4 - data = f.read(size) - if len(data) < size: - return -1,None - endpos = data.find(b'\0') - if endpos == -1: - return size,data.decode('latin-1') - elif endpos == 0: - return 0,"" - else: - return size,data[:endpos].decode('latin-1') - - def get_index_name(self, strtab, offset): - end = strtab.find(b'\0',offset) - if end == -1: - return strtab[offset:].decode('latin-1') - else: - return strtab[offset:end].decode('latin-1') - - def is_valid_first_hunk_type(self, hunk_type): - return hunk_type == HUNK_HEADER or hunk_type == HUNK_LIB or hunk_type == HUNK_UNIT - - def parse_header(self, f, hunk): - names = [] - hunk['names'] = names - while True: - l,s = self.read_name(f) - if l < 0: - self.error_string = "Error parsing HUNK_HEADER names" - return RESULT_INVALID_HUNK_FILE - elif l == 0: - break - names.append(s) - - # table size and hunk range - table_size = self.read_long(f) - first_hunk = self.read_long(f) - last_hunk = self.read_long(f) - if table_size < 0 or first_hunk < 0 or last_hunk < 0: - self.error_string = "HUNK_HEADER invalid table_size or first_hunk or last_hunk" - return RESULT_INVALID_HUNK_FILE - - hunk['table_size'] = table_size - hunk['first_hunk'] = first_hunk - hunk['last_hunk'] = last_hunk - - # determine number of hunks in size table - num_hunks = last_hunk - first_hunk + 1 - hunk_table = [] - for a in range(num_hunks): - hunk_info = {} - hunk_size = self.read_long(f) - if hunk_size < 0: - self.error_string = "HUNK_HEADER contains invalid hunk_size" - return RESULT_INVALID_HUNK_FILE - hunk_bytes = hunk_size & ~HUNKF_ALL - hunk_bytes *= 4 # longs to bytes - hunk_info['size'] = hunk_bytes - self.set_mem_flags(hunk_info, hunk_size & HUNKF_ALL, 30) - hunk_table.append(hunk_info) - hunk['hunks'] = hunk_table - return RESULT_OK - - def parse_code_or_data(self, f, hunk): - num_longs = self.read_long(f) - if num_longs < 0: - self.error_string = "%s has invalid size" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - - # read in hunk data - size = num_longs * 4 - - hunk['size'] = size & ~HUNKF_ALL - flags = size & HUNKF_ALL - self.set_mem_flags(hunk, flags, 30) - hunk['data_file_offset'] = f.tell() - data = f.read(hunk['size']) - hunk['data'] = data - return RESULT_OK - - def parse_bss(self, f, hunk): - num_longs = self.read_long(f) - if num_longs < 0: - self.error_string = "%s has invalid size" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - - # read in hunk data - size = num_longs * 4 - - hunk['size'] = size & ~HUNKF_ALL - flags = size & HUNKF_ALL - self.set_mem_flags(hunk, flags, 30) - return RESULT_OK - - def parse_reloc(self, f, hunk): - num_relocs = 1 - reloc = {} - hunk['reloc'] = reloc - while num_relocs != 0: - num_relocs = self.read_long(f) - if num_relocs < 0: - self.error_string = "%s has invalid number of relocations" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - elif num_relocs == 0: - # last relocation found - break - - # build reloc map - hunk_num = self.read_long(f) - if hunk_num < 0: - self.error_string = "%s has invalid hunk num" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - - offsets = [] - for a in range(num_relocs & 0xffff): - offset = self.read_long(f) - if offset < 0: - self.error_string = "%s has invalid relocation #%d offset %d (num_relocs=%d hunk_num=%d, offset=%d)" \ - % (hunk['type_name'],a,offset,num_relocs,hunk_num,f.tell()) - return RESULT_INVALID_HUNK_FILE - offsets.append(offset) - reloc[hunk_num] = offsets - return RESULT_OK - - def parse_reloc_short(self, f, hunk): - num_relocs = 1 - reloc = {} - hunk['reloc'] = reloc - total_words = 0 - while num_relocs != 0: - num_relocs = self.read_word(f) - if num_relocs < 0: - self.error_string = "%s has invalid number of relocations" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - elif num_relocs == 0: - # last relocation found - total_words += 1 - break - - # build reloc map - hunk_num = self.read_word(f) - if hunk_num < 0: - self.error_string = "%s has invalid hunk num" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - - offsets = [] - count = num_relocs & 0xffff - total_words += count + 2 - for a in range(count): - offset = self.read_word(f) - if offset < 0: - self.error_string = "%s has invalid relocation #%d offset %d (num_relocs=%d hunk_num=%d, offset=%d)" \ - % (hunk['type_name'],a,offset,num_relocs,hunk_num,f.tell()) - return RESULT_INVALID_HUNK_FILE - offsets.append(offset) - reloc[hunk_num] = offsets - - # padding - if total_words & 1 == 1: - self.read_word(f) - return RESULT_OK - - def parse_symbol(self, f, hunk): - name_len = 1 - symbols = [] - hunk['symbols'] = symbols - while name_len > 0: - (name_len, name) = self.read_name(f) - if name_len < 0: - self.error_string = "%s has invalid symbol name" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - elif name_len == 0: - # last name occurred - break - value = self.read_long(f) - if value < 0: - self.error_string = "%s has invalid symbol value" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - symbols.append( (name,value) ) - return RESULT_OK - - def parse_debug(self, f, hunk): - num_longs = self.read_long(f) - if num_longs < 0: - self.error_string = "%s has invalid size" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - size = num_longs * 4 - - offset = self.read_long(f) - hunk['debug_offset'] = offset; - tag = self.read_tag(f) - hunk['debug_type'] = tag; - size -= 8 - - if tag == 'LINE': - # parse LINE: source line -> code offset mapping - l = self.read_long(f) - size -= l * 4 + 4; - l,n = self.read_name_size(f,l) - src_map = [] - hunk['src_file'] = n - hunk['src_map'] = src_map - while size > 0: - line_no = self.read_long(f) + """Load Amiga executable Hunk structures""" + + def __init__(self): + self.hunks = [] + self.error_string = None + self.type = None + self.header = None + self.segments = [] + self.overlay = None + self.overlay_headers = None + self.overlay_segments = None + self.libs = None + self.units = None + + def get_struct_summary(self, obj): + if type(obj) == ListType: + result = [] + for a in obj: + v = self.get_struct_summary(a) + if v != None: + result.append(v) + return "[" + ",".join(result) + "]" + elif type(obj) == DictType: + if "type_name" in obj: + type_name = obj["type_name"] + return type_name.replace("HUNK_", "") + else: + result = [] + for k in list(obj.keys()): + v = self.get_struct_summary(obj[k]) + if v != None: + result.append(k + ":" + v) + return "{" + ",".join(result) + "}" + else: + return None + + def get_long(self, data): + return struct.unpack(">I", data)[0] + + def read_long(self, f): + data = f.read(4) + if len(data) == 0: + return -1 + elif len(data) != 4: + return -(len(data) + 1) + return struct.unpack(">I", data)[0] + + def read_word(self, f): + data = f.read(2) + if len(data) == 0: + return -1 + elif len(data) != 2: + return -(len(data) + 1) + return struct.unpack(">H", data)[0] + + def read_name(self, f): + num_longs = self.read_long(f) + if num_longs < 0: + return -1, None + elif num_longs == 0: + return 0, "" + else: + return self.read_name_size(f, num_longs) + + def read_tag(self, f): + data = f.read(4) + if len(data) == 0: + return -1 + elif len(data) != 4: + return -(len(data) + 1) + return data + + def read_name_size(self, f, num_longs): + size = (num_longs & 0xFFFFFF) * 4 + data = f.read(size) + if len(data) < size: + return -1, None + endpos = data.find(b"\0") + if endpos == -1: + return size, data.decode("latin-1") + elif endpos == 0: + return 0, "" + else: + return size, data[:endpos].decode("latin-1") + + def get_index_name(self, strtab, offset): + end = strtab.find(b"\0", offset) + if end == -1: + return strtab[offset:].decode("latin-1") + else: + return strtab[offset:end].decode("latin-1") + + def is_valid_first_hunk_type(self, hunk_type): + return ( + hunk_type == HUNK_HEADER or hunk_type == HUNK_LIB or hunk_type == HUNK_UNIT + ) + + def parse_header(self, f, hunk): + names = [] + hunk["names"] = names + while True: + l, s = self.read_name(f) + if l < 0: + self.error_string = "Error parsing HUNK_HEADER names" + return RESULT_INVALID_HUNK_FILE + elif l == 0: + break + names.append(s) + + # table size and hunk range + table_size = self.read_long(f) + first_hunk = self.read_long(f) + last_hunk = self.read_long(f) + if table_size < 0 or first_hunk < 0 or last_hunk < 0: + self.error_string = ( + "HUNK_HEADER invalid table_size or first_hunk or last_hunk" + ) + return RESULT_INVALID_HUNK_FILE + + hunk["table_size"] = table_size + hunk["first_hunk"] = first_hunk + hunk["last_hunk"] = last_hunk + + # determine number of hunks in size table + num_hunks = last_hunk - first_hunk + 1 + hunk_table = [] + for a in range(num_hunks): + hunk_info = {} + hunk_size = self.read_long(f) + if hunk_size < 0: + self.error_string = "HUNK_HEADER contains invalid hunk_size" + return RESULT_INVALID_HUNK_FILE + hunk_bytes = hunk_size & ~HUNKF_ALL + hunk_bytes *= 4 # longs to bytes + hunk_info["size"] = hunk_bytes + self.set_mem_flags(hunk_info, hunk_size & HUNKF_ALL, 30) + hunk_table.append(hunk_info) + hunk["hunks"] = hunk_table + return RESULT_OK + + def parse_code_or_data(self, f, hunk): + num_longs = self.read_long(f) + if num_longs < 0: + self.error_string = "%s has invalid size" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + + # read in hunk data + size = num_longs * 4 + + hunk["size"] = size & ~HUNKF_ALL + flags = size & HUNKF_ALL + self.set_mem_flags(hunk, flags, 30) + hunk["data_file_offset"] = f.tell() + data = f.read(hunk["size"]) + hunk["data"] = data + return RESULT_OK + + def parse_bss(self, f, hunk): + num_longs = self.read_long(f) + if num_longs < 0: + self.error_string = "%s has invalid size" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + + # read in hunk data + size = num_longs * 4 + + hunk["size"] = size & ~HUNKF_ALL + flags = size & HUNKF_ALL + self.set_mem_flags(hunk, flags, 30) + return RESULT_OK + + def parse_reloc(self, f, hunk): + num_relocs = 1 + reloc = {} + hunk["reloc"] = reloc + while num_relocs != 0: + num_relocs = self.read_long(f) + if num_relocs < 0: + self.error_string = "%s has invalid number of relocations" % ( + hunk["type_name"] + ) + return RESULT_INVALID_HUNK_FILE + elif num_relocs == 0: + # last relocation found + break + + # build reloc map + hunk_num = self.read_long(f) + if hunk_num < 0: + self.error_string = "%s has invalid hunk num" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + + offsets = [] + for a in range(num_relocs & 0xFFFF): + offset = self.read_long(f) + if offset < 0: + self.error_string = ( + "%s has invalid relocation #%d offset %d (num_relocs=%d hunk_num=%d, offset=%d)" + % (hunk["type_name"], a, offset, num_relocs, hunk_num, f.tell()) + ) + return RESULT_INVALID_HUNK_FILE + offsets.append(offset) + reloc[hunk_num] = offsets + return RESULT_OK + + def parse_reloc_short(self, f, hunk): + num_relocs = 1 + reloc = {} + hunk["reloc"] = reloc + total_words = 0 + while num_relocs != 0: + num_relocs = self.read_word(f) + if num_relocs < 0: + self.error_string = "%s has invalid number of relocations" % ( + hunk["type_name"] + ) + return RESULT_INVALID_HUNK_FILE + elif num_relocs == 0: + # last relocation found + total_words += 1 + break + + # build reloc map + hunk_num = self.read_word(f) + if hunk_num < 0: + self.error_string = "%s has invalid hunk num" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + + offsets = [] + count = num_relocs & 0xFFFF + total_words += count + 2 + for a in range(count): + offset = self.read_word(f) + if offset < 0: + self.error_string = ( + "%s has invalid relocation #%d offset %d (num_relocs=%d hunk_num=%d, offset=%d)" + % (hunk["type_name"], a, offset, num_relocs, hunk_num, f.tell()) + ) + return RESULT_INVALID_HUNK_FILE + offsets.append(offset) + reloc[hunk_num] = offsets + + # padding + if total_words & 1 == 1: + self.read_word(f) + return RESULT_OK + + def parse_symbol(self, f, hunk): + name_len = 1 + symbols = [] + hunk["symbols"] = symbols + while name_len > 0: + (name_len, name) = self.read_name(f) + if name_len < 0: + self.error_string = "%s has invalid symbol name" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + elif name_len == 0: + # last name occurred + break + value = self.read_long(f) + if value < 0: + self.error_string = "%s has invalid symbol value" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + symbols.append((name, value)) + return RESULT_OK + + def parse_debug(self, f, hunk): + num_longs = self.read_long(f) + if num_longs < 0: + self.error_string = "%s has invalid size" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + size = num_longs * 4 + offset = self.read_long(f) + hunk["debug_offset"] = offset + tag = self.read_tag(f) + hunk["debug_type"] = tag size -= 8 - src_map.append([line_no,offset]) - else: - # read unknown DEBUG hunk - hunk['data'] = f.read(size) - return RESULT_OK - - def find_first_code_hunk(self): - for hunk in self.hunks: - if hunk['type'] == HUNK_CODE: - return hunk - return None - - def parse_overlay(self, f, hunk): - # read size of overlay hunk - ov_size = self.read_long(f) - if ov_size < 0: - self.error_string = "%s has invalid size" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - - # read data of overlay - byte_size = (ov_size + 1) *4 - ov_data = f.read(byte_size) - hunk['ov_data'] = ov_data - - # check: first get header hunk - hdr_hunk = self.hunks[0] - if hdr_hunk['type'] != HUNK_HEADER: - self.error_string = "%s has no header hunk" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - - # first find the code segment of the overlay manager - overlay_mgr_hunk = self.find_first_code_hunk() - if overlay_mgr_hunk == None: - self.error_string = "%s has no overlay manager hunk" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - - # check overlay manager - overlay_mgr_data = overlay_mgr_hunk['data'] - magic = self.get_long(overlay_mgr_data[4:8]) - if magic != 0xabcd: - self.error_string = "no valid overlay manager magic found" - return RESULT_INVALID_HUNK_FILE - - # check for standard overlay manager - magic2 = self.get_long(overlay_mgr_data[24:28]) - magic3 = self.get_long(overlay_mgr_data[28:32]) - magic4 = self.get_long(overlay_mgr_data[32:36]) - std_overlay = (magic2 == 0x5ba0) and (magic3 == 0x074f7665) and (magic4 == 0x726c6179) - hunk['ov_std'] = std_overlay - - return RESULT_OK - - def parse_lib(self, f, hunk): - lib_size = self.read_long(f) - hunk['lib_file_offset'] = f.tell() - return RESULT_OK,lib_size * 4 - - def parse_index(self, f, hunk): - index_size = self.read_long(f) - total_size = index_size * 4 - - # first read string table - strtab_size = self.read_word(f) - strtab = f.read(strtab_size) - total_size -= strtab_size + 2 - - # read units - units = [] - hunk['units'] = units - unit_no = 0 - while total_size > 2: - # read name of unit - name_offset = self.read_word(f) - total_size -= 2 - - unit = {} - units.append(unit) - unit['unit_no'] = unit_no - unit_no += 1 - - # generate unit name - unit['name'] = self.get_index_name(strtab, name_offset) - - # hunks in unit - hunk_begin = self.read_word(f) - num_hunks = self.read_word(f) - total_size -= 4 - unit['hunk_begin_offset'] = hunk_begin - - # for all hunks in unit - ihunks = [] - unit['hunk_infos'] = ihunks - for a in range(num_hunks): - ihunk = {} - ihunks.append(ihunk) - - # get hunk info - name_offset = self.read_word(f) - hunk_size = self.read_word(f) - hunk_type = self.read_word(f) - total_size -= 6 - ihunk['name'] = self.get_index_name(strtab, name_offset) - ihunk['size'] = hunk_size - ihunk['type'] = hunk_type & 0x3fff - self.set_mem_flags(ihunk,hunk_type & 0xc000,14) - ihunk['type_name'] = hunk_names[hunk_type & 0x3fff] - - # get references - num_refs = self.read_word(f) - total_size -= 2 - if num_refs > 0: - refs = [] - ihunk['refs'] = refs - for b in range(num_refs): - ref = {} + + if tag == "LINE": + # parse LINE: source line -> code offset mapping + l = self.read_long(f) + size -= l * 4 + 4 + l, n = self.read_name_size(f, l) + src_map = [] + hunk["src_file"] = n + hunk["src_map"] = src_map + while size > 0: + line_no = self.read_long(f) + offset = self.read_long(f) + size -= 8 + src_map.append([line_no, offset]) + else: + # read unknown DEBUG hunk + hunk["data"] = f.read(size) + return RESULT_OK + + def find_first_code_hunk(self): + for hunk in self.hunks: + if hunk["type"] == HUNK_CODE: + return hunk + return None + + def parse_overlay(self, f, hunk): + # read size of overlay hunk + ov_size = self.read_long(f) + if ov_size < 0: + self.error_string = "%s has invalid size" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + + # read data of overlay + byte_size = (ov_size + 1) * 4 + ov_data = f.read(byte_size) + hunk["ov_data"] = ov_data + + # check: first get header hunk + hdr_hunk = self.hunks[0] + if hdr_hunk["type"] != HUNK_HEADER: + self.error_string = "%s has no header hunk" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + + # first find the code segment of the overlay manager + overlay_mgr_hunk = self.find_first_code_hunk() + if overlay_mgr_hunk == None: + self.error_string = "%s has no overlay manager hunk" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + + # check overlay manager + overlay_mgr_data = overlay_mgr_hunk["data"] + magic = self.get_long(overlay_mgr_data[4:8]) + if magic != 0xABCD: + self.error_string = "no valid overlay manager magic found" + return RESULT_INVALID_HUNK_FILE + + # check for standard overlay manager + magic2 = self.get_long(overlay_mgr_data[24:28]) + magic3 = self.get_long(overlay_mgr_data[28:32]) + magic4 = self.get_long(overlay_mgr_data[32:36]) + std_overlay = ( + (magic2 == 0x5BA0) and (magic3 == 0x074F7665) and (magic4 == 0x726C6179) + ) + hunk["ov_std"] = std_overlay + + return RESULT_OK + + def parse_lib(self, f, hunk): + lib_size = self.read_long(f) + hunk["lib_file_offset"] = f.tell() + return RESULT_OK, lib_size * 4 + + def parse_index(self, f, hunk): + index_size = self.read_long(f) + total_size = index_size * 4 + + # first read string table + strtab_size = self.read_word(f) + strtab = f.read(strtab_size) + total_size -= strtab_size + 2 + + # read units + units = [] + hunk["units"] = units + unit_no = 0 + while total_size > 2: + # read name of unit name_offset = self.read_word(f) total_size -= 2 - name = self.get_index_name(strtab, name_offset) - if name == '': - # 16 bit refs point to the previous zero byte before the string entry... - name = self.get_index_name(strtab, name_offset+1) - ref['bits'] = 16 + + unit = {} + units.append(unit) + unit["unit_no"] = unit_no + unit_no += 1 + + # generate unit name + unit["name"] = self.get_index_name(strtab, name_offset) + + # hunks in unit + hunk_begin = self.read_word(f) + num_hunks = self.read_word(f) + total_size -= 4 + unit["hunk_begin_offset"] = hunk_begin + + # for all hunks in unit + ihunks = [] + unit["hunk_infos"] = ihunks + for a in range(num_hunks): + ihunk = {} + ihunks.append(ihunk) + + # get hunk info + name_offset = self.read_word(f) + hunk_size = self.read_word(f) + hunk_type = self.read_word(f) + total_size -= 6 + ihunk["name"] = self.get_index_name(strtab, name_offset) + ihunk["size"] = hunk_size + ihunk["type"] = hunk_type & 0x3FFF + self.set_mem_flags(ihunk, hunk_type & 0xC000, 14) + ihunk["type_name"] = hunk_names[hunk_type & 0x3FFF] + + # get references + num_refs = self.read_word(f) + total_size -= 2 + if num_refs > 0: + refs = [] + ihunk["refs"] = refs + for b in range(num_refs): + ref = {} + name_offset = self.read_word(f) + total_size -= 2 + name = self.get_index_name(strtab, name_offset) + if name == "": + # 16 bit refs point to the previous zero byte before the string entry... + name = self.get_index_name(strtab, name_offset + 1) + ref["bits"] = 16 + else: + ref["bits"] = 32 + ref["name"] = name + refs.append(ref) + + # get definitions + num_defs = self.read_word(f) + total_size -= 2 + if num_defs > 0: + defs = [] + ihunk["defs"] = defs + for b in range(num_defs): + name_offset = self.read_word(f) + def_value = self.read_word(f) + def_type_flags = self.read_word(f) + def_type = def_type_flags & 0x3FFF + def_flags = def_type_flags & 0xC000 + total_size -= 6 + name = self.get_index_name(strtab, name_offset) + d = {"name": name, "value": def_value, "type": def_type} + self.set_mem_flags(d, def_flags, 14) + defs.append(d) + + # align hunk + if total_size == 2: + self.read_word(f) + elif total_size != 0: + self.error_string = "%s has invalid padding: %d" % ( + hunk["type_name"], + total_size, + ) + return RESULT_INVALID_HUNK_FILE + return RESULT_OK + + def parse_ext(self, f, hunk): + ext_def = [] + ext_ref = [] + ext_common = [] + hunk["ext_def"] = ext_def + hunk["ext_ref"] = ext_ref + hunk["ext_common"] = ext_common + ext_type_size = 1 + while ext_type_size > 0: + # ext type | size + ext_type_size = self.read_long(f) + if ext_type_size < 0: + self.error_string = "%s has invalid size" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + ext_type = ext_type_size >> EXT_TYPE_SHIFT + ext_size = ext_type_size & EXT_TYPE_SIZE_MASK + + # ext name + l, ext_name = self.read_name_size(f, ext_size) + if l < 0: + self.error_string = "%s has invalid name" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + elif l == 0: + break + + # create local ext object + ext = {"type": ext_type, "name": ext_name} + + # check and setup type name + if ext_type not in ext_names: + self.error_string = "%s has unspported ext entry %d" % ( + hunk["type_name"], + ext_type, + ) + return RESULT_INVALID_HUNK_FILE + ext["type_name"] = ext_names[ext_type] + + # ext common + if ext_type == EXT_ABSCOMMON or ext_type == EXT_RELCOMMON: + ext["common_size"] = self.read_long(f) + ext_common.append(ext) + # ext def + elif ext_type == EXT_DEF or ext_type == EXT_ABS or ext_type == EXT_RES: + ext["def"] = self.read_long(f) + ext_def.append(ext) + # ext ref else: - ref['bits'] = 32 - ref['name'] = name - refs.append(ref) - - # get definitions - num_defs = self.read_word(f) - total_size -= 2 - if num_defs > 0: - defs = [] - ihunk['defs'] = defs - for b in range(num_defs): - name_offset = self.read_word(f) - def_value = self.read_word(f) - def_type_flags = self.read_word(f) - def_type = def_type_flags & 0x3fff - def_flags = def_type_flags & 0xc000 - total_size -= 6 - name = self.get_index_name(strtab, name_offset) - d = { 'name':name, 'value':def_value,'type':def_type} - self.set_mem_flags(d,def_flags,14) - defs.append(d) - - # align hunk - if total_size == 2: - self.read_word(f) - elif total_size != 0: - self.error_string = "%s has invalid padding: %d" % (hunk['type_name'], total_size) - return RESULT_INVALID_HUNK_FILE - return RESULT_OK - - def parse_ext(self, f, hunk): - ext_def = [] - ext_ref = [] - ext_common = [] - hunk['ext_def'] = ext_def - hunk['ext_ref'] = ext_ref - hunk['ext_common'] = ext_common - ext_type_size = 1 - while ext_type_size > 0: - # ext type | size - ext_type_size = self.read_long(f) - if ext_type_size < 0: - self.error_string = "%s has invalid size" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - ext_type = ext_type_size >> EXT_TYPE_SHIFT - ext_size = ext_type_size & EXT_TYPE_SIZE_MASK - - # ext name - l,ext_name = self.read_name_size(f, ext_size) - if l < 0: - self.error_string = "%s has invalid name" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - elif l == 0: - break - - # create local ext object - ext = { 'type' : ext_type, 'name' : ext_name } - - # check and setup type name - if ext_type not in ext_names: - self.error_string = "%s has unspported ext entry %d" % (hunk['type_name'],ext_type) - return RESULT_INVALID_HUNK_FILE - ext['type_name'] = ext_names[ext_type] - - # ext common - if ext_type == EXT_ABSCOMMON or ext_type == EXT_RELCOMMON: - ext['common_size'] = self.read_long(f) - ext_common.append(ext) - # ext def - elif ext_type == EXT_DEF or ext_type == EXT_ABS or ext_type == EXT_RES: - ext['def'] = self.read_long(f) - ext_def.append(ext) - # ext ref - else: - num_refs = self.read_long(f) - if num_refs == 0: - num_refs = 1 - refs = [] - for a in range(num_refs): - ref = self.read_long(f) - refs.append(ref) - ext['refs'] = refs - ext_ref.append(ext) - - return RESULT_OK - - def parse_unit_or_name(self, f, hunk): - l,n = self.read_name(f) - if l < 0: - self.error_string = "%s has invalid name" % (hunk['type_name']) - return RESULT_INVALID_HUNK_FILE - elif l > 0: - hunk['name'] = n - else: - hunk['name'] = "" - return RESULT_OK - - def set_mem_flags(self, hunk, flags, shift): - f = flags >> shift - if f & 1 == 1: - hunk['memf'] = 'chip' - elif f & 2 == 2: - hunk['memf'] = 'fast' - else: - hunk['memf'] = '' - - # ----- public functions ----- - - """Read a hunk file and build internal hunk structure - Return status and set self.error_string on failure - """ - def read_file(self, hfile): - with open(hfile, "rb") as f: - return self.read_file_obj(hfile, f) - - """Read a hunk from memory""" - def read_mem(self, name, data): - fobj = io.StringIO(data) - return self.read_file_obj(name, fobj) - - def read_file_obj(self, hfile, f): - self.hunks = [] - is_first_hunk = True - is_exe = False - was_end = False - was_overlay = False - self.error_string = None - lib_size = 0 - last_file_offset = 0 - - while True: - hunk_file_offset = f.tell() - - # read hunk type - hunk_raw_type = self.read_long(f) - if hunk_raw_type == -1 or hunk_raw_type == -2: # tolerate extra byte at end - if is_first_hunk: - self.error_string = "No valid hunk file: '%s' is empty" % (hfile) - return RESULT_NO_HUNK_FILE - else: - # eof - break - elif hunk_raw_type < 0: - if is_first_hunk: - self.error_string = "No valid hunk file: '%s' is too short" % (hfile) - return RESULT_NO_HUNK_FILE + num_refs = self.read_long(f) + if num_refs == 0: + num_refs = 1 + refs = [] + for a in range(num_refs): + ref = self.read_long(f) + refs.append(ref) + ext["refs"] = refs + ext_ref.append(ext) + + return RESULT_OK + + def parse_unit_or_name(self, f, hunk): + l, n = self.read_name(f) + if l < 0: + self.error_string = "%s has invalid name" % (hunk["type_name"]) + return RESULT_INVALID_HUNK_FILE + elif l > 0: + hunk["name"] = n else: - self.error_string = "Error reading hunk type @%08x" % (f.tell()) - return RESULT_INVALID_HUNK_FILE - - hunk_type = hunk_raw_type & HUNK_TYPE_MASK - hunk_flags = hunk_raw_type & HUNK_FLAGS_MASK - - # check range of hunk type - if hunk_type not in hunk_names: - # no hunk file? - if is_first_hunk: - self.error_string = "No hunk file: '%s' type was %d" % (hfile, hunk_type) - return RESULT_NO_HUNK_FILE - elif was_end: - # garbage after an end tag is ignored - return RESULT_OK - elif was_overlay: - # seems to be a custom overlay -> read to end of file - ov_custom_data = f.read() - self.hunks[-1]['custom_data'] = ov_custom_data - return RESULT_OK + hunk["name"] = "" + return RESULT_OK + + def set_mem_flags(self, hunk, flags, shift): + f = flags >> shift + if f & 1 == 1: + hunk["memf"] = "chip" + elif f & 2 == 2: + hunk["memf"] = "fast" else: - self.error_string = "Invalid hunk type %d/%x found at @%08x" % (hunk_type,hunk_type,f.tell()) - return RESULT_INVALID_HUNK_FILE - else: - # check for valid first hunk type - if is_first_hunk: - if not self.is_valid_first_hunk_type(hunk_type): - self.error_string = "No hunk file: '%s' first hunk type was %d" % (hfile, hunk_type) - return RESULT_NO_HUNK_FILE - else: - is_exe = hunk_type == HUNK_HEADER - - is_first_hunk = False - was_end = False - was_overlay = False + hunk["memf"] = "" - # V37 fix: in an executable DREL32 is wrongly assigned and actually is a RELOC32SHORT - if hunk_type == HUNK_DREL32 and is_exe: - hunk_type = HUNK_RELOC32SHORT - - hunk = { 'type' : hunk_type, 'hunk_file_offset' : hunk_file_offset } - self.hunks.append(hunk) - hunk['type_name'] = hunk_names[hunk_type] - self.set_mem_flags(hunk, hunk_flags, 30) - - # account for lib - last_hunk_size = hunk_file_offset - last_file_offset - if lib_size > 0: - lib_size -= last_hunk_size - if lib_size > 0: - hunk['in_lib'] = True - - # ----- HUNK_HEADER ----- - if hunk_type == HUNK_HEADER: - result = self.parse_header(f,hunk) - # ----- HUNK_CODE/HUNK_DATA ------ - elif hunk_type == HUNK_CODE or hunk_type == HUNK_DATA or hunk_type == HUNK_PPC_CODE: - result = self.parse_code_or_data(f,hunk) - # ---- HUNK_BSS ---- - elif hunk_type == HUNK_BSS: - result = self.parse_bss(f,hunk) - # ----- HUNK_ ----- - elif hunk_type == HUNK_RELRELOC32 or hunk_type == HUNK_ABSRELOC16 \ - or hunk_type == HUNK_RELRELOC8 or hunk_type == HUNK_RELRELOC16 or hunk_type == HUNK_ABSRELOC32 \ - or hunk_type == HUNK_DREL32 or hunk_type == HUNK_DREL16 or hunk_type == HUNK_DREL8 \ - or hunk_type == HUNK_RELRELOC26: - result = self.parse_reloc(f,hunk) - # ---- HUNK_ ----- - elif hunk_type == HUNK_RELOC32SHORT: - result = self.parse_reloc_short(f,hunk) - # ----- HUNK_SYMBOL ----- - elif hunk_type == HUNK_SYMBOL: - result = self.parse_symbol(f,hunk) - # ----- HUNK_DEBUG ----- - elif hunk_type == HUNK_DEBUG: - result = self.parse_debug(f,hunk) - # ----- HUNK_END ----- - elif hunk_type == HUNK_END: - was_end = True - result = RESULT_OK - # ----- HUNK_OVERLAY ----- - elif hunk_type == HUNK_OVERLAY: - result = self.parse_overlay(f,hunk) - was_overlay = True - # ----- HUNK_BREAK ----- - elif hunk_type == HUNK_BREAK: - result = RESULT_OK - # ----- HUNK_LIB ----- - elif hunk_type == HUNK_LIB: - result,lib_size = self.parse_lib(f,hunk) - lib_size += 8 # add size of HUNK_LIB itself - # ----- HUNK_INDEX ----- - elif hunk_type == HUNK_INDEX: - result = self.parse_index(f,hunk) - # ----- HUNK_EXT ----- - elif hunk_type == HUNK_EXT: - result = self.parse_ext(f,hunk) - # ----- HUNK_UNIT ----- - elif hunk_type == HUNK_UNIT or hunk_type == HUNK_NAME: - result = self.parse_unit_or_name(f,hunk) - # ----- oops! unsupported hunk ----- - else: - self.error_string = "unsupported hunk %d" % (hunk_type) - return RESULT_UNSUPPORTED_HUNKS + # ----- public functions ----- + + """Read a hunk file and build internal hunk structure + Return status and set self.error_string on failure + """ - # a parse error occurred - if result != RESULT_OK: - return result + def read_file(self, hfile): + with open(hfile, "rb") as f: + return self.read_file_obj(hfile, f) - last_file_offset = hunk_file_offset - return RESULT_OK + """Read a hunk from memory""" - """Return a list with all the hunk type names that were found + def read_mem(self, name, data): + fobj = io.StringIO(data) + return self.read_file_obj(name, fobj) + + def read_file_obj(self, hfile, f): + self.hunks = [] + is_first_hunk = True + is_exe = False + was_end = False + was_overlay = False + self.error_string = None + lib_size = 0 + last_file_offset = 0 + + while True: + hunk_file_offset = f.tell() + + # read hunk type + hunk_raw_type = self.read_long(f) + if hunk_raw_type == -1 or hunk_raw_type == -2: # tolerate extra byte at end + if is_first_hunk: + self.error_string = "No valid hunk file: '%s' is empty" % (hfile) + return RESULT_NO_HUNK_FILE + else: + # eof + break + elif hunk_raw_type < 0: + if is_first_hunk: + self.error_string = "No valid hunk file: '%s' is too short" % ( + hfile + ) + return RESULT_NO_HUNK_FILE + else: + self.error_string = "Error reading hunk type @%08x" % (f.tell()) + return RESULT_INVALID_HUNK_FILE + + hunk_type = hunk_raw_type & HUNK_TYPE_MASK + hunk_flags = hunk_raw_type & HUNK_FLAGS_MASK + + # check range of hunk type + if hunk_type not in hunk_names: + # no hunk file? + if is_first_hunk: + self.error_string = "No hunk file: '%s' type was %d" % ( + hfile, + hunk_type, + ) + return RESULT_NO_HUNK_FILE + elif was_end: + # garbage after an end tag is ignored + return RESULT_OK + elif was_overlay: + # seems to be a custom overlay -> read to end of file + ov_custom_data = f.read() + self.hunks[-1]["custom_data"] = ov_custom_data + return RESULT_OK + else: + self.error_string = "Invalid hunk type %d/%x found at @%08x" % ( + hunk_type, + hunk_type, + f.tell(), + ) + return RESULT_INVALID_HUNK_FILE + else: + # check for valid first hunk type + if is_first_hunk: + if not self.is_valid_first_hunk_type(hunk_type): + self.error_string = ( + "No hunk file: '%s' first hunk type was %d" + % (hfile, hunk_type) + ) + return RESULT_NO_HUNK_FILE + else: + is_exe = hunk_type == HUNK_HEADER + + is_first_hunk = False + was_end = False + was_overlay = False + + # V37 fix: in an executable DREL32 is wrongly assigned and actually is a RELOC32SHORT + if hunk_type == HUNK_DREL32 and is_exe: + hunk_type = HUNK_RELOC32SHORT + + hunk = {"type": hunk_type, "hunk_file_offset": hunk_file_offset} + self.hunks.append(hunk) + hunk["type_name"] = hunk_names[hunk_type] + self.set_mem_flags(hunk, hunk_flags, 30) + + # account for lib + last_hunk_size = hunk_file_offset - last_file_offset + if lib_size > 0: + lib_size -= last_hunk_size + if lib_size > 0: + hunk["in_lib"] = True + + # ----- HUNK_HEADER ----- + if hunk_type == HUNK_HEADER: + result = self.parse_header(f, hunk) + # ----- HUNK_CODE/HUNK_DATA ------ + elif ( + hunk_type == HUNK_CODE + or hunk_type == HUNK_DATA + or hunk_type == HUNK_PPC_CODE + ): + result = self.parse_code_or_data(f, hunk) + # ---- HUNK_BSS ---- + elif hunk_type == HUNK_BSS: + result = self.parse_bss(f, hunk) + # ----- HUNK_ ----- + elif ( + hunk_type == HUNK_RELRELOC32 + or hunk_type == HUNK_ABSRELOC16 + or hunk_type == HUNK_RELRELOC8 + or hunk_type == HUNK_RELRELOC16 + or hunk_type == HUNK_ABSRELOC32 + or hunk_type == HUNK_DREL32 + or hunk_type == HUNK_DREL16 + or hunk_type == HUNK_DREL8 + or hunk_type == HUNK_RELRELOC26 + ): + result = self.parse_reloc(f, hunk) + # ---- HUNK_ ----- + elif hunk_type == HUNK_RELOC32SHORT: + result = self.parse_reloc_short(f, hunk) + # ----- HUNK_SYMBOL ----- + elif hunk_type == HUNK_SYMBOL: + result = self.parse_symbol(f, hunk) + # ----- HUNK_DEBUG ----- + elif hunk_type == HUNK_DEBUG: + result = self.parse_debug(f, hunk) + # ----- HUNK_END ----- + elif hunk_type == HUNK_END: + was_end = True + result = RESULT_OK + # ----- HUNK_OVERLAY ----- + elif hunk_type == HUNK_OVERLAY: + result = self.parse_overlay(f, hunk) + was_overlay = True + # ----- HUNK_BREAK ----- + elif hunk_type == HUNK_BREAK: + result = RESULT_OK + # ----- HUNK_LIB ----- + elif hunk_type == HUNK_LIB: + result, lib_size = self.parse_lib(f, hunk) + lib_size += 8 # add size of HUNK_LIB itself + # ----- HUNK_INDEX ----- + elif hunk_type == HUNK_INDEX: + result = self.parse_index(f, hunk) + # ----- HUNK_EXT ----- + elif hunk_type == HUNK_EXT: + result = self.parse_ext(f, hunk) + # ----- HUNK_UNIT ----- + elif hunk_type == HUNK_UNIT or hunk_type == HUNK_NAME: + result = self.parse_unit_or_name(f, hunk) + # ----- oops! unsupported hunk ----- + else: + self.error_string = "unsupported hunk %d" % (hunk_type) + return RESULT_UNSUPPORTED_HUNKS + + # a parse error occurred + if result != RESULT_OK: + return result + + last_file_offset = hunk_file_offset + return RESULT_OK + + """Return a list with all the hunk type names that were found """ - def get_hunk_summary(self): - return self.get_struct_summary(self.hunks) - - # ---------- Build Segments from Hunks ---------- - - def build_loadseg(self): - in_header = True - seek_begin = False - segment = None - segment_list = self.segments - for e in self.hunks: - hunk_type = e['type'] - - # check for end of header - if in_header and hunk_type in loadseg_valid_begin_hunks: - in_header = False - seek_begin = True - - if in_header: - if hunk_type == HUNK_HEADER: - # we are in an overlay! - if self.overlay != None: - segment_list = [] - self.overlay_segments.append(segment_list) - self.overlay_headers.append(e) - else: - # set load_seg() header - self.header = e - - # start a new segment - segment = [] - - # setup hunk counter - hunk_no = e['first_hunk'] - - # we allow a debug hunk in header for SAS compatibility - elif hunk_type == HUNK_DEBUG: - segment.append(e) - else: - self.error_string = "Expected header in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) - return False - - elif seek_begin: - # a new hunk shall begin - if hunk_type in loadseg_valid_begin_hunks: - segment = [e] - segment_list.append(segment) - seek_header = False - seek_begin = False - e['hunk_no'] = hunk_no - e['alloc_size'] = self.header['hunks'][hunk_no]['size'] - hunk_no += 1 - # add an extra overlay "hunk" - elif hunk_type == HUNK_OVERLAY: - # assume hunk to be empty - if self.overlay != None: - self.error_string = "Multiple overlay in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) + + def get_hunk_summary(self): + return self.get_struct_summary(self.hunks) + + # ---------- Build Segments from Hunks ---------- + + def build_loadseg(self): + in_header = True + seek_begin = False + segment = None + segment_list = self.segments + for e in self.hunks: + hunk_type = e["type"] + + # check for end of header + if in_header and hunk_type in loadseg_valid_begin_hunks: + in_header = False + seek_begin = True + + if in_header: + if hunk_type == HUNK_HEADER: + # we are in an overlay! + if self.overlay != None: + segment_list = [] + self.overlay_segments.append(segment_list) + self.overlay_headers.append(e) + else: + # set load_seg() header + self.header = e + + # start a new segment + segment = [] + + # setup hunk counter + hunk_no = e["first_hunk"] + + # we allow a debug hunk in header for SAS compatibility + elif hunk_type == HUNK_DEBUG: + segment.append(e) + else: + self.error_string = "Expected header in loadseg: %s %d/%x" % ( + e["type_name"], + hunk_type, + hunk_type, + ) + return False + + elif seek_begin: + # a new hunk shall begin + if hunk_type in loadseg_valid_begin_hunks: + segment = [e] + segment_list.append(segment) + seek_header = False + seek_begin = False + e["hunk_no"] = hunk_no + e["alloc_size"] = self.header["hunks"][hunk_no]["size"] + hunk_no += 1 + # add an extra overlay "hunk" + elif hunk_type == HUNK_OVERLAY: + # assume hunk to be empty + if self.overlay != None: + self.error_string = "Multiple overlay in loadseg: %s %d/%x" % ( + e["type_name"], + hunk_type, + hunk_type, + ) + return False + self.overlay = e + self.overlay_headers = [] + self.overlay_segments = [] + in_header = True + # break + elif hunk_type == HUNK_BREAK: + # assume hunk to be empty + in_header = True + # broken hunk: multiple END or other hunks + elif hunk_type in [HUNK_END, HUNK_NAME, HUNK_DEBUG]: + pass + else: + self.error_string = "Expected hunk start in loadseg: %s %d/%x" % ( + e["type_name"], + hunk_type, + hunk_type, + ) + return False + + else: + # an extra block in hunk or end is expected + if hunk_type == HUNK_END: + seek_begin = True + # contents of hunk + elif hunk_type in loadseg_valid_extra_hunks or hunk_type == HUNK_DREL32: + segment.append(e) + # broken hunk file without END tag + elif hunk_type in loadseg_valid_begin_hunks: + segment = [e] + segment_list.append(segment) + seek_header = False + seek_begin = False + e["hunk_no"] = hunk_no + e["alloc_size"] = self.header["hunks"][hunk_no]["size"] + hunk_no += 1 + # unecpected hunk?! + else: + self.error_string = "Unexpected hunk extra in loadseg: %s %d/%x" % ( + e["type_name"], + hunk_type, + hunk_type, + ) + return False + return True + + def build_unit(self): + force_unit = True + in_hunk = False + name = None + segment = None + unit = None + self.units = [] + unit_no = 0 + for e in self.hunks: + hunk_type = e["type"] + + # optional unit as first entry + if hunk_type == HUNK_UNIT: + unit = {} + unit["name"] = e["name"] + unit["unit_no"] = unit_no + unit["segments"] = [] + unit["unit"] = e + unit_no += 1 + self.units.append(unit) + force_unit = False + hunk_no = 0 + elif force_unit: + self.error_string = "Expected name hunk in unit: %s %d/%x" % ( + e["type_name"], + hunk_type, + hunk_type, + ) + return False + elif not in_hunk: + # begin a named hunk + if hunk_type == HUNK_NAME: + name = e["name"] + # main hunk block + elif hunk_type in unit_valid_main_hunks: + segment = [e] + unit["segments"].append(segment) + # give main block the NAME + if name != None: + e["name"] = name + name = None + e["hunk_no"] = hunk_no + hunk_no += 1 + in_hunk = True + # broken hunk: ignore multi ENDs + elif hunk_type == HUNK_END: + pass + else: + self.error_string = "Expected main hunk in unit: %s %d/%x" % ( + e["type_name"], + hunk_type, + hunk_type, + ) + return False + else: + # a hunk is finished + if hunk_type == HUNK_END: + in_hunk = False + # contents of hunk + elif hunk_type in unit_valid_extra_hunks: + segment.append(e) + # unecpected hunk?! + else: + self.error_string = "Unexpected hunk in unit: %s %d/%x" % ( + e["type_name"], + hunk_type, + hunk_type, + ) + return False + + return True + + def build_lib(self): + self.libs = [] + lib_segments = [] + seek_lib = True + seek_main = False + for e in self.hunks: + hunk_type = e["type"] + + # seeking for a LIB hunk + if seek_lib: + if hunk_type == HUNK_LIB: + segment_list = [] + lib_segments.append(segment_list) + seek_lib = False + seek_main = True + hunk_no = 0 + + # get start address of lib hunk in file + lib_file_offset = e["lib_file_offset"] + else: + self.error_string = "Expected lib hunk in lib: %s %d/%x" % ( + e["type_name"], + hunk_type, + hunk_type, + ) + return False + elif seek_main: + # end of lib? -> index! + if hunk_type == HUNK_INDEX: + seek_main = False + seek_lib = True + lib_units = [] + if not self.resolve_index_hunks(e, segment_list, lib_units): + self.error_string = "Error resolving index hunks!" + return False + lib = {} + lib["units"] = lib_units + lib["lib_no"] = len(self.libs) + lib["index"] = e + self.libs.append(lib) + # start of a hunk + elif hunk_type in unit_valid_main_hunks: + segment = [e] + e["hunk_no"] = hunk_no + hunk_no += 1 + segment_list.append(segment) + seek_main = False + + # calc relative lib address + hunk_lib_offset = e["hunk_file_offset"] - lib_file_offset + e["hunk_lib_offset"] = hunk_lib_offset + else: + self.error_string = "Expected main hunk in lib: %s %d/%x" % ( + e["type_name"], + hunk_type, + hunk_type, + ) + return False + else: + # end hunk + if hunk_type == HUNK_END: + seek_main = True + # extra contents + elif hunk_type in unit_valid_extra_hunks: + segment.append(e) + else: + self.error_string = "Unexpected hunk in lib: %s %d/%x" % ( + e["type_name"], + hunk_type, + hunk_type, + ) + return False + + return True + + """Resolve hunks referenced in the index""" + + def resolve_index_hunks(self, index, segment_list, lib_units): + units = index["units"] + no = 0 + for unit in units: + lib_unit = {} + unit_segments = [] + lib_unit["segments"] = unit_segments + lib_unit["name"] = unit["name"] + lib_unit["unit_no"] = no + lib_unit["index_unit"] = unit + lib_units.append(lib_unit) + no += 1 + + # try to find segment with start offset + hunk_offset = unit["hunk_begin_offset"] + found = False + for segment in segment_list: + hunk_no = segment[0]["hunk_no"] + lib_off = segment[0]["hunk_lib_offset"] // 4 # is in longwords + if lib_off == hunk_offset: + # found segment + num_segs = len(unit["hunk_infos"]) + for i in range(num_segs): + info = unit["hunk_infos"][i] + seg = segment_list[hunk_no + i] + unit_segments.append(seg) + # renumber hunk + seg[0]["hunk_no"] = i + seg[0]["name"] = info["name"] + seg[0]["index_hunk"] = info + found = True + + if not found: + return False + return True + + """From the hunk list build a set of segments that form the actual binary""" + + def build_segments(self): + self.segments = [] + if len(self.hunks) == 0: + self.type = TYPE_UNKNOWN return False - self.overlay = e - self.overlay_headers = [] - self.overlay_segments = [] - in_header = True - # break - elif hunk_type == HUNK_BREAK: - # assume hunk to be empty - in_header = True - # broken hunk: multiple END or other hunks - elif hunk_type in [HUNK_END, HUNK_NAME, HUNK_DEBUG]: - pass - else: - self.error_string = "Expected hunk start in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) - return False - - else: - # an extra block in hunk or end is expected - if hunk_type == HUNK_END: - seek_begin = True - # contents of hunk - elif hunk_type in loadseg_valid_extra_hunks or hunk_type == HUNK_DREL32: - segment.append(e) - # broken hunk file without END tag - elif hunk_type in loadseg_valid_begin_hunks: - segment = [e] - segment_list.append(segment) - seek_header = False - seek_begin = False - e['hunk_no'] = hunk_no - e['alloc_size'] = self.header['hunks'][hunk_no]['size'] - hunk_no += 1 - # unecpected hunk?! - else: - self.error_string = "Unexpected hunk extra in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) - return False - return True - - def build_unit(self): - force_unit = True - in_hunk = False - name = None - segment = None - unit = None - self.units = [] - unit_no = 0 - for e in self.hunks: - hunk_type = e['type'] - - # optional unit as first entry - if hunk_type == HUNK_UNIT: - unit = {} - unit['name'] = e['name'] - unit['unit_no'] = unit_no - unit['segments'] = [] - unit['unit'] = e - unit_no += 1 - self.units.append(unit) - force_unit = False - hunk_no = 0 - elif force_unit: - self.error_string = "Expected name hunk in unit: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) - return False - elif not in_hunk: - # begin a named hunk - if hunk_type == HUNK_NAME: - name = e['name'] - # main hunk block - elif hunk_type in unit_valid_main_hunks: - segment = [e] - unit['segments'].append(segment) - # give main block the NAME - if name != None: - e['name'] = name - name = None - e['hunk_no'] = hunk_no - hunk_no += 1 - in_hunk = True - # broken hunk: ignore multi ENDs - elif hunk_type == HUNK_END: - pass - else: - self.error_string = "Expected main hunk in unit: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) - return False - else: - # a hunk is finished - if hunk_type == HUNK_END: - in_hunk = False - # contents of hunk - elif hunk_type in unit_valid_extra_hunks: - segment.append(e) - # unecpected hunk?! - else: - self.error_string = "Unexpected hunk in unit: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) - return False - - return True - - def build_lib(self): - self.libs = [] - lib_segments = [] - seek_lib = True - seek_main = False - for e in self.hunks: - hunk_type = e['type'] - - # seeking for a LIB hunk - if seek_lib: - if hunk_type == HUNK_LIB: - segment_list = [] - lib_segments.append(segment_list) - seek_lib = False - seek_main = True - hunk_no = 0 - - # get start address of lib hunk in file - lib_file_offset = e['lib_file_offset'] + + # determine type of file from first hunk + first_hunk_type = self.hunks[0]["type"] + if first_hunk_type == HUNK_HEADER: + self.type = TYPE_LOADSEG + return self.build_loadseg() + elif first_hunk_type == HUNK_UNIT: + self.type = TYPE_UNIT + return self.build_unit() + elif first_hunk_type == HUNK_LIB: + self.type = TYPE_LIB + return self.build_lib() else: - self.error_string = "Expected lib hunk in lib: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) - return False - elif seek_main: - # end of lib? -> index! - if hunk_type == HUNK_INDEX: - seek_main = False - seek_lib = True - lib_units = [] - if not self.resolve_index_hunks(e, segment_list, lib_units): - self.error_string = "Error resolving index hunks!" + self.type = TYPE_UNKNOWN return False - lib = {} - lib['units'] = lib_units - lib['lib_no'] = len(self.libs) - lib['index'] = e - self.libs.append(lib) - # start of a hunk - elif hunk_type in unit_valid_main_hunks: - segment = [e] - e['hunk_no'] = hunk_no - hunk_no += 1 - segment_list.append(segment) - seek_main = False - - # calc relative lib address - hunk_lib_offset = e['hunk_file_offset'] - lib_file_offset - e['hunk_lib_offset'] = hunk_lib_offset + + """Return a summary of the created segment structure""" + + def get_segment_summary(self): + return self.get_struct_summary(self.segments) + + def get_overlay_segment_summary(self): + if self.overlay_segments != None: + return self.get_struct_summary(self.overlay_segments) else: - self.error_string = "Expected main hunk in lib: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) - return False - else: - # end hunk - if hunk_type == HUNK_END: - seek_main = True - # extra contents - elif hunk_type in unit_valid_extra_hunks: - segment.append(e) + return None + + def get_libs_summary(self): + if self.libs != None: + return self.get_struct_summary(self.libs) + else: + return None + + def get_units_summary(self): + if self.units != None: + return self.get_struct_summary(self.units) else: - self.error_string = "Unexpected hunk in lib: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) - return False - - return True - - """Resolve hunks referenced in the index""" - def resolve_index_hunks(self, index, segment_list, lib_units): - units = index['units'] - no = 0 - for unit in units: - lib_unit = {} - unit_segments = [] - lib_unit['segments'] = unit_segments - lib_unit['name'] = unit['name'] - lib_unit['unit_no'] = no - lib_unit['index_unit'] = unit - lib_units.append(lib_unit) - no += 1 - - # try to find segment with start offset - hunk_offset = unit['hunk_begin_offset'] - found = False - for segment in segment_list: - hunk_no = segment[0]['hunk_no'] - lib_off = segment[0]['hunk_lib_offset'] // 4 # is in longwords - if lib_off == hunk_offset: - # found segment - num_segs = len(unit['hunk_infos']) - for i in range(num_segs): - info = unit['hunk_infos'][i] - seg = segment_list[hunk_no+i] - unit_segments.append(seg) - # renumber hunk - seg[0]['hunk_no'] = i - seg[0]['name'] = info['name'] - seg[0]['index_hunk'] = info - found = True - - if not found: - return False - return True - - """From the hunk list build a set of segments that form the actual binary""" - def build_segments(self): - self.segments = [] - if len(self.hunks) == 0: - self.type = TYPE_UNKNOWN - return False - - # determine type of file from first hunk - first_hunk_type = self.hunks[0]['type'] - if first_hunk_type == HUNK_HEADER: - self.type = TYPE_LOADSEG - return self.build_loadseg() - elif first_hunk_type == HUNK_UNIT: - self.type = TYPE_UNIT - return self.build_unit() - elif first_hunk_type == HUNK_LIB: - self.type = TYPE_LIB - return self.build_lib() - else: - self.type = TYPE_UNKNOWN - return False - - """Return a summary of the created segment structure""" - def get_segment_summary(self): - return self.get_struct_summary(self.segments) - - def get_overlay_segment_summary(self): - if self.overlay_segments != None: - return self.get_struct_summary(self.overlay_segments) - else: - return None - - def get_libs_summary(self): - if self.libs != None: - return self.get_struct_summary(self.libs) - else: - return None - - def get_units_summary(self): - if self.units != None: - return self.get_struct_summary(self.units) - else: - return None + return None diff --git a/amitools/binfmt/hunk/HunkRelocate.py b/amitools/binfmt/hunk/HunkRelocate.py index 327921cf..634b83a0 100644 --- a/amitools/binfmt/hunk/HunkRelocate.py +++ b/amitools/binfmt/hunk/HunkRelocate.py @@ -2,89 +2,95 @@ import struct from . import Hunk + class HunkRelocate: - - def __init__(self, hunk_file, verbose=False): - self.hunk_file = hunk_file - self.verbose = verbose - - def get_sizes(self): - sizes = [] - for segment in self.hunk_file.segments: - main_hunk = segment[0] - size = main_hunk['alloc_size'] - sizes.append(size) - return sizes + def __init__(self, hunk_file, verbose=False): + self.hunk_file = hunk_file + self.verbose = verbose + + def get_sizes(self): + sizes = [] + for segment in self.hunk_file.segments: + main_hunk = segment[0] + size = main_hunk["alloc_size"] + sizes.append(size) + return sizes + + def get_total_size(self): + sizes = self.get_sizes() + total = 0 + for s in sizes: + total += s + return total + + def get_type_names(self): + names = [] + for segment in self.hunk_file.segments: + main_hunk = segment[0] + name = main_hunk["type_name"] + names.append(name) + return names + + # generate a sequence of addresses suitable for relocation + # in a single block + def get_seq_addrs(self, base_addr, padding=0): + sizes = self.get_sizes() + addrs = [] + addr = base_addr + for s in sizes: + addrs.append(addr) + addr += s + padding + return addrs + + def relocate(self, addr): + datas = [] + for segment in self.hunk_file.segments: + main_hunk = segment[0] + hunk_no = main_hunk["hunk_no"] + alloc_size = main_hunk["alloc_size"] + size = main_hunk["size"] + data = ctypes.create_string_buffer(alloc_size) + + # fill in segment data + if "data" in main_hunk: + data.value = main_hunk["data"] + + if self.verbose: + print("#%02d @ %06x" % (hunk_no, addr[hunk_no])) + + # find relocation hunks + for hunk in segment[1:]: + # abs reloc 32 or + # HUNK_DREL32 is a buggy V37 HUNK_RELOC32SHORT... + if ( + hunk["type"] == Hunk.HUNK_ABSRELOC32 + or hunk["type"] == Hunk.HUNK_DREL32 + ): + reloc = hunk["reloc"] + for hunk_num in reloc: + # get address of other hunk + hunk_addr = addr[hunk_num] + offsets = reloc[hunk_num] + for offset in offsets: + self.relocate32(hunk_no, data, offset, hunk_addr) - def get_total_size(self): - sizes = self.get_sizes() - total = 0 - for s in sizes: - total += s - return total + datas.append(data.raw) + return datas - def get_type_names(self): - names = [] - for segment in self.hunk_file.segments: - main_hunk = segment[0] - name = main_hunk['type_name'] - names.append(name) - return names + def relocate32(self, hunk_no, data, offset, hunk_addr): + delta = self.read_long(data, offset) + addr = hunk_addr + delta + self.write_long(data, offset, addr) + if self.verbose: + print( + "#%02d + %06x: %06x (delta) + %06x (hunk_addr) -> %06x" + % (hunk_no, offset, delta, hunk_addr, addr) + ) - # generate a sequence of addresses suitable for relocation - # in a single block - def get_seq_addrs(self, base_addr, padding=0): - sizes = self.get_sizes() - addrs = [] - addr = base_addr - for s in sizes: - addrs.append(addr) - addr += s + padding - return addrs - - def relocate(self, addr): - datas = [] - for segment in self.hunk_file.segments: - main_hunk = segment[0] - hunk_no = main_hunk['hunk_no'] - alloc_size = main_hunk['alloc_size'] - size = main_hunk['size'] - data = ctypes.create_string_buffer(alloc_size) - - # fill in segment data - if 'data' in main_hunk: - data.value = main_hunk['data'] - - if self.verbose: - print("#%02d @ %06x" % (hunk_no, addr[hunk_no])) - - # find relocation hunks - for hunk in segment[1:]: - # abs reloc 32 or - # HUNK_DREL32 is a buggy V37 HUNK_RELOC32SHORT... - if hunk['type'] == Hunk.HUNK_ABSRELOC32 or hunk['type'] == Hunk.HUNK_DREL32: - reloc = hunk['reloc'] - for hunk_num in reloc: - # get address of other hunk - hunk_addr = addr[hunk_num] - offsets = reloc[hunk_num] - for offset in offsets: - self.relocate32(hunk_no,data,offset,hunk_addr) - - datas.append(data.raw) - return datas + def read_long(self, data, offset): + bytes = data[offset : offset + 4] + return struct.unpack(">i", bytes)[0] - def relocate32(self, hunk_no, data, offset, hunk_addr): - delta = self.read_long(data, offset) - addr = hunk_addr + delta - self.write_long(data, offset, addr) - if self.verbose: - print("#%02d + %06x: %06x (delta) + %06x (hunk_addr) -> %06x" % (hunk_no, offset, delta, hunk_addr, addr)) - - def read_long(self, data, offset): - bytes = data[offset:offset+4] - return struct.unpack(">i",bytes)[0] - - def write_long(self, data, offset, value): - bytes = struct.pack(">i",value) - data[offset:offset+4] = bytes + def write_long(self, data, offset, value): + bytes = struct.pack(">i", value) + data[offset : offset + 4] = bytes diff --git a/amitools/binfmt/hunk/HunkShow.py b/amitools/binfmt/hunk/HunkShow.py index 7d4dd30e..609b5442 100644 --- a/amitools/binfmt/hunk/HunkShow.py +++ b/amitools/binfmt/hunk/HunkShow.py @@ -2,223 +2,260 @@ from . import HunkDisassembler from amitools.util.HexDump import * -class HunkShow: - def __init__(self, hunk_file, show_relocs=False, show_debug=False, \ - disassemble=False, disassemble_start=0, hexdump=False, brief=False, \ - cpu='68000'): - self.hunk_file = hunk_file - - # clone file refs - self.header = hunk_file.header - self.segments = hunk_file.segments - self.overlay = hunk_file.overlay - self.overlay_headers = hunk_file.overlay_headers - self.overlay_segments = hunk_file.overlay_segments - self.libs = hunk_file.libs - self.units = hunk_file.units - - self.show_relocs=show_relocs - self.show_debug=show_debug - self.disassemble=disassemble - self.disassemble_start=disassemble_start - self.cpu = cpu - self.hexdump=hexdump - self.brief=brief - - def show_segments(self): - hunk_type = self.hunk_file.type - if hunk_type == Hunk.TYPE_LOADSEG: - self.show_loadseg_segments() - elif hunk_type == Hunk.TYPE_UNIT: - self.show_unit_segments() - elif hunk_type == Hunk.TYPE_LIB: - self.show_lib_segments() - - def show_lib_segments(self): - for lib in self.libs: - print("Library #%d" % lib['lib_no']) - for unit in lib['units']: - self.print_unit(unit['unit_no'], unit['name']) - for segment in unit['segments']: - self.show_segment(segment, unit['segments']) - - def show_unit_segments(self): - for unit in self.units: - self.print_unit(unit['unit_no'], unit['name']) - for segment in unit['segments']: - self.show_segment(segment, unit['segments']) - - def show_loadseg_segments(self): - # header + segments - if not self.brief: - self.print_header(self.header) - for segment in self.segments: - self.show_segment(segment, self.segments) - - # overlay - if self.overlay != None: - print("Overlay") - num_ov = len(self.overlay_headers) - for o in range(num_ov): +class HunkShow: + def __init__( + self, + hunk_file, + show_relocs=False, + show_debug=False, + disassemble=False, + disassemble_start=0, + hexdump=False, + brief=False, + cpu="68000", + ): + self.hunk_file = hunk_file + + # clone file refs + self.header = hunk_file.header + self.segments = hunk_file.segments + self.overlay = hunk_file.overlay + self.overlay_headers = hunk_file.overlay_headers + self.overlay_segments = hunk_file.overlay_segments + self.libs = hunk_file.libs + self.units = hunk_file.units + + self.show_relocs = show_relocs + self.show_debug = show_debug + self.disassemble = disassemble + self.disassemble_start = disassemble_start + self.cpu = cpu + self.hexdump = hexdump + self.brief = brief + + def show_segments(self): + hunk_type = self.hunk_file.type + if hunk_type == Hunk.TYPE_LOADSEG: + self.show_loadseg_segments() + elif hunk_type == Hunk.TYPE_UNIT: + self.show_unit_segments() + elif hunk_type == Hunk.TYPE_LIB: + self.show_lib_segments() + + def show_lib_segments(self): + for lib in self.libs: + print("Library #%d" % lib["lib_no"]) + for unit in lib["units"]: + self.print_unit(unit["unit_no"], unit["name"]) + for segment in unit["segments"]: + self.show_segment(segment, unit["segments"]) + + def show_unit_segments(self): + for unit in self.units: + self.print_unit(unit["unit_no"], unit["name"]) + for segment in unit["segments"]: + self.show_segment(segment, unit["segments"]) + + def show_loadseg_segments(self): + # header + segments if not self.brief: - self.print_header(self.overlay_headers[o]) - for segment in self.overlay_segments[o]: - self.show_segment(segment, self.overlay_segments[o]) - - def show_segment(self, hunk, seg_list): - main = hunk[0] - - # unit hunks are named - name = "" - if 'name' in hunk[0]: - name = "'%s'" % main['name'] - - type_name = main['type_name'].replace("HUNK_","") - size = main['size'] - hunk_no = main['hunk_no'] - if 'data_file_offset' in main: - data_file_offset = main['data_file_offset'] - else: - data_file_offset = None - hunk_file_offset = main['hunk_file_offset'] - if 'alloc_size' in main: - alloc_size = main['alloc_size'] - else: - alloc_size = None - - self.print_segment_header(hunk_no, type_name, size, name, data_file_offset, hunk_file_offset, alloc_size) - if self.hexdump and 'data' in main: - print_hex(main['data'],indent=8) - - for extra in hunk[1:]: - self.show_extra_hunk(extra) - - # index hunk info is embedded if its in a lib - if 'index_hunk' in main: - self.show_index_info(main['index_hunk']) - - if main['type'] == Hunk.HUNK_CODE and self.disassemble and len(main['data'])>0: - disas = HunkDisassembler.HunkDisassembler(cpu = self.cpu) - print() - disas.show_disassembly(hunk, seg_list, self.disassemble_start) - print() - - def show_index_info(self, info): - # references from index - if 'refs' in info: - self.print_extra("refs","#%d" % len(info['refs'])) - if not self.brief: - for ref in info['refs']: - self.print_symbol(-1,ref['name'],"(%d bits)" % ref['bits']) - # defines from index - if 'defs' in info: - self.print_extra("defs","#%d" % len(info['defs'])) - if not self.brief: - for d in info['defs']: - self.print_symbol(d['value'],d['name'],"(type %d)" % d['type']) - - def show_extra_hunk(self, hunk): - hunk_type = hunk['type'] - if hunk_type in Hunk.reloc_hunks: - type_name = hunk['type_name'].replace("HUNK_","").lower() - self.print_extra("reloc","%s #%d" % (type_name, len(hunk['reloc']))) - if not self.brief: - self.show_reloc_hunk(hunk) - - elif hunk_type == Hunk.HUNK_DEBUG: - self.print_extra("debug","%s offset=%08x" % (hunk['debug_type'], hunk['debug_offset'])) - if not self.brief: - self.show_debug_hunk(hunk) - - elif hunk_type == Hunk.HUNK_SYMBOL: - self.print_extra("symbol","#%d" % (len(hunk['symbols']))) - if not self.brief: - self.show_symbol_hunk(hunk) - - elif hunk_type == Hunk.HUNK_EXT: - self.print_extra("ext","def #%d ref #%d common #%d" % (len(hunk['ext_def']),len(hunk['ext_ref']),len(hunk['ext_common']))) - if not self.brief: - self.show_ext_hunk(hunk) - - else: - self.print_extra("extra","%s" % hunk['type_name']) - - def show_reloc_hunk(self, hunk): - reloc = hunk['reloc'] - for hunk_num in reloc: - offsets = reloc[hunk_num] - if self.show_relocs: - for offset in offsets: - self.print_symbol(offset,"Segment #%d" % hunk_num,"") - else: - self.print_extra_sub("To Segment #%d: %4d entries" % (hunk_num, len(offsets))) - - def show_debug_hunk(self, hunk): - debug_type = hunk['debug_type'] - if debug_type == 'LINE': - self.print_extra_sub("line for '%s'" % hunk['src_file']) - if self.show_debug: - for src_off in hunk['src_map']: - addr = src_off[1] - line = src_off[0] - self.print_symbol(addr,"line %d" % line,"") - else: - if self.show_debug: - print_hex(hunk['data'],indent=8) - - def show_symbol_hunk(self, hunk): - for symbol in hunk['symbols']: - self.print_symbol(symbol[1],symbol[0],"") - - def show_ext_hunk(self, hunk): - # definition - for ext in hunk['ext_def']: - tname = ext['type_name'].replace("EXT_","").lower() - self.print_symbol(ext['def'],ext['name'],tname) - # references - for ext in hunk['ext_ref']: - refs = ext['refs'] - tname = ext['type_name'].replace("EXT_","").lower() - for ref in refs: - self.print_symbol(ref,ext['name'],tname) - - # common_base - for ext in hunk['ext_common']: - tname = ext['type_name'].replace("EXT_","").lower() - self.print_symbol(ext['common_size'],ext['name'],tname) - - # ----- printing ----- - - def print_header(self, hdr): - print("\t header (segments: first=%d, last=%d, table size=%d)" % (hdr['first_hunk'], hdr['last_hunk'], hdr['table_size'])) - - def print_extra(self, type_name, info): - print("\t\t%8s %s" % (type_name, info)) - - def print_extra_sub(self, text): - print("\t\t\t%s" % text) - - def print_segment_header(self, hunk_no, type_name, size, name, data_file_offset, hunk_file_offset, alloc_size): - extra = "" - if alloc_size != None: - extra += "alloc size %08x " % alloc_size - extra += "file header @%08x" % hunk_file_offset - if data_file_offset != None: - extra += " data @%08x" % data_file_offset - print("\t#%03d %-5s size %08x %s %s" % (hunk_no, type_name, size, extra, name)) - - def print_symbol(self,addr,name,extra): - if addr == -1: - a = "xxxxxxxx" - else: - a = "%08x" % addr - print("\t\t\t%s %-32s %s" % (a,name,extra)) - - def print_unit(self, no, name): - print(" #%03d UNIT %s" % (no, name)) - - - - - + self.print_header(self.header) + for segment in self.segments: + self.show_segment(segment, self.segments) + + # overlay + if self.overlay != None: + print("Overlay") + num_ov = len(self.overlay_headers) + for o in range(num_ov): + if not self.brief: + self.print_header(self.overlay_headers[o]) + for segment in self.overlay_segments[o]: + self.show_segment(segment, self.overlay_segments[o]) + + def show_segment(self, hunk, seg_list): + main = hunk[0] + + # unit hunks are named + name = "" + if "name" in hunk[0]: + name = "'%s'" % main["name"] + + type_name = main["type_name"].replace("HUNK_", "") + size = main["size"] + hunk_no = main["hunk_no"] + if "data_file_offset" in main: + data_file_offset = main["data_file_offset"] + else: + data_file_offset = None + hunk_file_offset = main["hunk_file_offset"] + if "alloc_size" in main: + alloc_size = main["alloc_size"] + else: + alloc_size = None + + self.print_segment_header( + hunk_no, + type_name, + size, + name, + data_file_offset, + hunk_file_offset, + alloc_size, + ) + if self.hexdump and "data" in main: + print_hex(main["data"], indent=8) + + for extra in hunk[1:]: + self.show_extra_hunk(extra) + + # index hunk info is embedded if its in a lib + if "index_hunk" in main: + self.show_index_info(main["index_hunk"]) + + if ( + main["type"] == Hunk.HUNK_CODE + and self.disassemble + and len(main["data"]) > 0 + ): + disas = HunkDisassembler.HunkDisassembler(cpu=self.cpu) + print() + disas.show_disassembly(hunk, seg_list, self.disassemble_start) + print() + + def show_index_info(self, info): + # references from index + if "refs" in info: + self.print_extra("refs", "#%d" % len(info["refs"])) + if not self.brief: + for ref in info["refs"]: + self.print_symbol(-1, ref["name"], "(%d bits)" % ref["bits"]) + # defines from index + if "defs" in info: + self.print_extra("defs", "#%d" % len(info["defs"])) + if not self.brief: + for d in info["defs"]: + self.print_symbol(d["value"], d["name"], "(type %d)" % d["type"]) + + def show_extra_hunk(self, hunk): + hunk_type = hunk["type"] + if hunk_type in Hunk.reloc_hunks: + type_name = hunk["type_name"].replace("HUNK_", "").lower() + self.print_extra("reloc", "%s #%d" % (type_name, len(hunk["reloc"]))) + if not self.brief: + self.show_reloc_hunk(hunk) + + elif hunk_type == Hunk.HUNK_DEBUG: + self.print_extra( + "debug", "%s offset=%08x" % (hunk["debug_type"], hunk["debug_offset"]) + ) + if not self.brief: + self.show_debug_hunk(hunk) + + elif hunk_type == Hunk.HUNK_SYMBOL: + self.print_extra("symbol", "#%d" % (len(hunk["symbols"]))) + if not self.brief: + self.show_symbol_hunk(hunk) + + elif hunk_type == Hunk.HUNK_EXT: + self.print_extra( + "ext", + "def #%d ref #%d common #%d" + % (len(hunk["ext_def"]), len(hunk["ext_ref"]), len(hunk["ext_common"])), + ) + if not self.brief: + self.show_ext_hunk(hunk) + + else: + self.print_extra("extra", "%s" % hunk["type_name"]) + + def show_reloc_hunk(self, hunk): + reloc = hunk["reloc"] + for hunk_num in reloc: + offsets = reloc[hunk_num] + if self.show_relocs: + for offset in offsets: + self.print_symbol(offset, "Segment #%d" % hunk_num, "") + else: + self.print_extra_sub( + "To Segment #%d: %4d entries" % (hunk_num, len(offsets)) + ) + + def show_debug_hunk(self, hunk): + debug_type = hunk["debug_type"] + if debug_type == "LINE": + self.print_extra_sub("line for '%s'" % hunk["src_file"]) + if self.show_debug: + for src_off in hunk["src_map"]: + addr = src_off[1] + line = src_off[0] + self.print_symbol(addr, "line %d" % line, "") + else: + if self.show_debug: + print_hex(hunk["data"], indent=8) + + def show_symbol_hunk(self, hunk): + for symbol in hunk["symbols"]: + self.print_symbol(symbol[1], symbol[0], "") + + def show_ext_hunk(self, hunk): + # definition + for ext in hunk["ext_def"]: + tname = ext["type_name"].replace("EXT_", "").lower() + self.print_symbol(ext["def"], ext["name"], tname) + # references + for ext in hunk["ext_ref"]: + refs = ext["refs"] + tname = ext["type_name"].replace("EXT_", "").lower() + for ref in refs: + self.print_symbol(ref, ext["name"], tname) + + # common_base + for ext in hunk["ext_common"]: + tname = ext["type_name"].replace("EXT_", "").lower() + self.print_symbol(ext["common_size"], ext["name"], tname) + + # ----- printing ----- + + def print_header(self, hdr): + print( + "\t header (segments: first=%d, last=%d, table size=%d)" + % (hdr["first_hunk"], hdr["last_hunk"], hdr["table_size"]) + ) + + def print_extra(self, type_name, info): + print("\t\t%8s %s" % (type_name, info)) + + def print_extra_sub(self, text): + print("\t\t\t%s" % text) + + def print_segment_header( + self, + hunk_no, + type_name, + size, + name, + data_file_offset, + hunk_file_offset, + alloc_size, + ): + extra = "" + if alloc_size != None: + extra += "alloc size %08x " % alloc_size + extra += "file header @%08x" % hunk_file_offset + if data_file_offset != None: + extra += " data @%08x" % data_file_offset + print( + "\t#%03d %-5s size %08x %s %s" % (hunk_no, type_name, size, extra, name) + ) + + def print_symbol(self, addr, name, extra): + if addr == -1: + a = "xxxxxxxx" + else: + a = "%08x" % addr + print("\t\t\t%s %-32s %s" % (a, name, extra)) + + def print_unit(self, no, name): + print(" #%03d UNIT %s" % (no, name)) diff --git a/amitools/fd/FDFormat.py b/amitools/fd/FDFormat.py index bf6e255b..1c654923 100644 --- a/amitools/fd/FDFormat.py +++ b/amitools/fd/FDFormat.py @@ -4,153 +4,158 @@ from .FuncDef import FuncDef from amitools.util.DataDir import get_data_sub_dir + def get_fd_name(lib_name): - """return the name associated for a given library/device name""" - if lib_name.endswith(".device"): - fd_name = lib_name.replace(".device", "_lib.fd") - elif lib_name.endswith(".library"): - fd_name = lib_name.replace(".library", "_lib.fd") - else: - raise ValueError("can't find fd name for '%s'" % lib_name) - return fd_name + """return the name associated for a given library/device name""" + if lib_name.endswith(".device"): + fd_name = lib_name.replace(".device", "_lib.fd") + elif lib_name.endswith(".library"): + fd_name = lib_name.replace(".library", "_lib.fd") + else: + raise ValueError("can't find fd name for '%s'" % lib_name) + return fd_name + def get_base_name(lib_name): - if lib_name.endswith(".device"): - base_name = lib_name.replace(".device", "Base") - elif lib_name.endswith(".library"): - base_name = lib_name.replace(".library", "Base") - else: - raise ValueError("can't find base name for '%s'" % lib_name) - return "_" + base_name[0].upper() + base_name[1:] + if lib_name.endswith(".device"): + base_name = lib_name.replace(".device", "Base") + elif lib_name.endswith(".library"): + base_name = lib_name.replace(".library", "Base") + else: + raise ValueError("can't find base name for '%s'" % lib_name) + return "_" + base_name[0].upper() + base_name[1:] + def is_device(lib_name): - """return true if given name is associated with a device""" - return lib_name.endswith(".device") + """return true if given name is associated with a device""" + return lib_name.endswith(".device") + def read_lib_fd(lib_name, fd_dir=None, add_std_calls=True): - # get default path if none is given - if fd_dir is None: - fd_dir = get_data_sub_dir("fd") - # get fd path - fd_name = get_fd_name(lib_name) - fd_path = os.path.join(fd_dir, fd_name) - if not os.path.isfile(fd_path): - return None - # try to read fd - fd = read_fd(fd_path) - fd.is_device = is_device(lib_name) - if add_std_calls: - fd.add_std_calls() - return fd + # get default path if none is given + if fd_dir is None: + fd_dir = get_data_sub_dir("fd") + # get fd path + fd_name = get_fd_name(lib_name) + fd_path = os.path.join(fd_dir, fd_name) + if not os.path.isfile(fd_path): + return None + # try to read fd + fd = read_fd(fd_path) + fd.is_device = is_device(lib_name) + if add_std_calls: + fd.add_std_calls() + return fd + def generate_fd(lib_name, num_calls=0, add_std_calls=True): - base = get_base_name(lib_name) - func_table = FuncTable(base) - func_table.is_device = is_device(lib_name) - offset = func_table.get_num_std_calls() + 1 - bias = offset * 6 - while offset <= num_calls: - n = "FakeFunc_%d" % offset - f = FuncDef(n, bias) - func_table.add_func(f) - offset += 1 - bias += 6 - if add_std_calls: - func_table.add_std_calls() - return func_table + base = get_base_name(lib_name) + func_table = FuncTable(base) + func_table.is_device = is_device(lib_name) + offset = func_table.get_num_std_calls() + 1 + bias = offset * 6 + while offset <= num_calls: + n = "FakeFunc_%d" % offset + f = FuncDef(n, bias) + func_table.add_func(f) + offset += 1 + bias += 6 + if add_std_calls: + func_table.add_std_calls() + return func_table -def read_fd(fname): - func_pat = r"([A-Za-z][_A-Za-z00-9]+)\((.*)\)\((.*)\)" - func_table = None - bias = 0 - private = True - # parse file - f = open(fname, "r") - for line in f: - l = line.strip() - if len(l) > 1 and l[0] != '*': - # a command - if l[0] == '#' and l[1] == '#': - cmdline = l[2:] - cmda = cmdline.split(" ") - cmd = cmda[0] - if cmd == "base": - base = cmda[1] - func_table = FuncTable(base) - elif cmd == "bias": - bias = int(cmda[1]) - elif cmd == "private": - private = True - elif cmd == "public": - private = False - elif cmd == "end": - break - else: - print("Invalid command:",cmda) - return None - # a function - else: - m = re.match(func_pat, l) - if m == None: - raise IOError("Invalid FD Format") - else: - name = m.group(1) - # create a function definition - func_def = FuncDef(name, bias, private) - if func_table != None: - func_table.add_func(func_def) - # check args - args = m.group(2) - regs = m.group(3) - arg = args.replace(',','/').split('/') - reg = regs.replace(',','/').split('/') - if len(arg) != len(reg): - # hack for double reg args found in mathieeedoub* libs - if len(arg) * 2 == len(reg): - arg_hi = [x + "_hi" for x in arg] - arg_lo = [x + "_lo" for x in arg] - arg = [x for pair in zip(arg_hi, arg_lo) for x in pair] - else: - raise IOError("Reg and Arg name mismatch in FD File") - if arg[0] != '': - num_args = len(arg) - for i in range(num_args): - func_def.add_arg(arg[i],reg[i]) - bias += 6 - f.close() - return func_table -def write_fd(fname, fd, add_private): - fo = open(fname, "w") - fo.write("##base %s\n" % (fd.get_base_name())) - last_bias = 0 - last_mode = None - funcs = fd.get_funcs() - for f in funcs: - if not f.is_private() or add_private: - # check new mode - if f.is_private(): - new_mode = "private" - else: - new_mode = "public" - if last_mode != new_mode: - fo.write("##%s\n" % new_mode) - last_mode = new_mode - # check new bias - new_bias = f.get_bias() - if last_bias + 6 != new_bias: - fo.write("##bias %d\n" % new_bias) - last_bias = new_bias - # build func - line = f.get_name() - args = f.get_args() - if args == None: - line += "()()" - else: - line += "(" + ",".join([x[0] for x in args]) + ")" - line += "(" + "/".join([x[1] for x in args]) + ")" - fo.write("%s\n" % line) - fo.write("##end\n") - fo.close() +def read_fd(fname): + func_pat = r"([A-Za-z][_A-Za-z00-9]+)\((.*)\)\((.*)\)" + func_table = None + bias = 0 + private = True + # parse file + f = open(fname, "r") + for line in f: + l = line.strip() + if len(l) > 1 and l[0] != "*": + # a command + if l[0] == "#" and l[1] == "#": + cmdline = l[2:] + cmda = cmdline.split(" ") + cmd = cmda[0] + if cmd == "base": + base = cmda[1] + func_table = FuncTable(base) + elif cmd == "bias": + bias = int(cmda[1]) + elif cmd == "private": + private = True + elif cmd == "public": + private = False + elif cmd == "end": + break + else: + print("Invalid command:", cmda) + return None + # a function + else: + m = re.match(func_pat, l) + if m == None: + raise IOError("Invalid FD Format") + else: + name = m.group(1) + # create a function definition + func_def = FuncDef(name, bias, private) + if func_table != None: + func_table.add_func(func_def) + # check args + args = m.group(2) + regs = m.group(3) + arg = args.replace(",", "/").split("/") + reg = regs.replace(",", "/").split("/") + if len(arg) != len(reg): + # hack for double reg args found in mathieeedoub* libs + if len(arg) * 2 == len(reg): + arg_hi = [x + "_hi" for x in arg] + arg_lo = [x + "_lo" for x in arg] + arg = [x for pair in zip(arg_hi, arg_lo) for x in pair] + else: + raise IOError("Reg and Arg name mismatch in FD File") + if arg[0] != "": + num_args = len(arg) + for i in range(num_args): + func_def.add_arg(arg[i], reg[i]) + bias += 6 + f.close() + return func_table +def write_fd(fname, fd, add_private): + fo = open(fname, "w") + fo.write("##base %s\n" % (fd.get_base_name())) + last_bias = 0 + last_mode = None + funcs = fd.get_funcs() + for f in funcs: + if not f.is_private() or add_private: + # check new mode + if f.is_private(): + new_mode = "private" + else: + new_mode = "public" + if last_mode != new_mode: + fo.write("##%s\n" % new_mode) + last_mode = new_mode + # check new bias + new_bias = f.get_bias() + if last_bias + 6 != new_bias: + fo.write("##bias %d\n" % new_bias) + last_bias = new_bias + # build func + line = f.get_name() + args = f.get_args() + if args == None: + line += "()()" + else: + line += "(" + ",".join([x[0] for x in args]) + ")" + line += "(" + "/".join([x[1] for x in args]) + ")" + fo.write("%s\n" % line) + fo.write("##end\n") + fo.close() diff --git a/amitools/fd/FuncDef.py b/amitools/fd/FuncDef.py index 7474e5b6..0c3d3a67 100644 --- a/amitools/fd/FuncDef.py +++ b/amitools/fd/FuncDef.py @@ -1,36 +1,48 @@ class FuncDef: - """A function definition""" - def __init__(self, name, bias, private=False, is_std=False): - self.name = name - self.bias = bias - self.index = (bias - 6) // 6 - self.private = private - self.std = is_std - self.args = [] - def __str__(self): - return self.get_str() - def get_name(self): - return self.name - def get_bias(self): - return self.bias - def get_index(self): - return self.index - def is_private(self): - return self.private - def is_std(self): - return self.std - def get_args(self): - return self.args - def add_arg(self, name, reg): - self.args.append((name, reg)) - def dump(self): - print((self.name,self.bias,self.private,self.args)) - def get_arg_str(self, with_reg=True): - if len(self.args) == 0: - return "()" - elif with_reg: - return "( " + ", ".join(["%s/%s" % (x[0],x[1]) for x in self.args]) + " )" - else: - return "( " + ", ".join(["%s" % x[0] for x in self.args]) + " )" - def get_str(self, with_reg=True): - return self.name + self.get_arg_str(with_reg) + """A function definition""" + + def __init__(self, name, bias, private=False, is_std=False): + self.name = name + self.bias = bias + self.index = (bias - 6) // 6 + self.private = private + self.std = is_std + self.args = [] + + def __str__(self): + return self.get_str() + + def get_name(self): + return self.name + + def get_bias(self): + return self.bias + + def get_index(self): + return self.index + + def is_private(self): + return self.private + + def is_std(self): + return self.std + + def get_args(self): + return self.args + + def add_arg(self, name, reg): + self.args.append((name, reg)) + + def dump(self): + print((self.name, self.bias, self.private, self.args)) + + def get_arg_str(self, with_reg=True): + if len(self.args) == 0: + return "()" + elif with_reg: + return "( " + ", ".join(["%s/%s" % (x[0], x[1]) for x in self.args]) + " )" + else: + return "( " + ", ".join(["%s" % x[0] for x in self.args]) + " )" + + def get_str(self, with_reg=True): + return self.name + self.get_arg_str(with_reg) diff --git a/amitools/fd/FuncTable.py b/amitools/fd/FuncTable.py index c5db77d5..82b1bac7 100644 --- a/amitools/fd/FuncTable.py +++ b/amitools/fd/FuncTable.py @@ -1,111 +1,113 @@ from .FuncDef import FuncDef + class FuncTable: - """Store a function table""" - def __init__(self, base_name, is_device=False): - self.funcs = [] - self.base_name = base_name - self.bias_map = {} - self.name_map = {} - self.index_tab = [] - self.max_bias = 0 - self.is_device = is_device - - def get_base_name(self): - return self.base_name - - def get_funcs(self): - return self.funcs - - def get_func_by_bias(self, bias): - if bias in self.bias_map: - return self.bias_map[bias] - else: - return None - - def get_max_bias(self): - return self.max_bias - - def get_neg_size(self): - return self.max_bias + 6 - - def get_num_indices(self): - return self.max_bias // 6 - - def get_all_func_names(): - return list(self.name_map.keys()) - - def has_func(self, name): - return name in self.name_map - - def get_func_by_name(self, name): - if name in self.name_map: - return self.name_map[name] - else: - return None - - def get_num_funcs(self): - return len(self.funcs) - - def get_index_table(self): - return self.index_tab - - def get_func_by_index(self, idx): - return self.index_tab[idx] - - def add_func(self, f): - # add to list - self.funcs.append(f) - # store by bias - bias = f.get_bias() - if bias in self.bias_map: - raise ValueError("bias %d already added!" % bias) - self.bias_map[bias] = f - # store by name - name = f.get_name() - self.name_map[name] = f - # adjust max bias - if bias > self.max_bias: - self.max_bias = bias - # update index table - tab_len = bias // 6 - while len(self.index_tab) < tab_len: - self.index_tab.append(None) - index = tab_len - 1 - self.index_tab[index] = f - - def add_call(self,name,bias,arg,reg,is_std=False): - if len(arg) != len(reg): - raise IOError("Reg and Arg name mismatch in function definition") - else: - func_def = FuncDef(name, bias, False, is_std) - self.add_func(func_def) - if arg and len(arg) > 0: - num_args = len(arg) - for i in range(num_args): - func_def.add_arg(arg[i],reg[i]) - - def dump(self): - print(("FuncTable:",self.base_name)) - for f in self.funcs: - f.dump() - - def get_num_std_calls(self): - if self.is_device: - return 6 - else: - return 4 - - def add_std_calls(self): - if self.is_device: - self.add_call("_OpenDev",6,["IORequest","Unit"],["a1","d0"],True) - self.add_call("_CloseDev",12,["IORequest"],["a1"],True) - self.add_call("_ExpungeDev",18,["MyDev"],["a6"],True) - self.add_call("_Empty",24,[],[],True) - self.add_call("BeginIO",30,["IORequest"],["a1"],True) - self.add_call("AbortIO",36,["IORequest"],["a1"],True) - else: - self.add_call("_OpenLib",6,["MyLib"],["a6"],True) - self.add_call("_CloseLib",12,["MyLib"],["a6"],True) - self.add_call("_ExpungeLib",18,["MyLib"],["a6"],True) - self.add_call("_Empty",24,[],[],True) + """Store a function table""" + + def __init__(self, base_name, is_device=False): + self.funcs = [] + self.base_name = base_name + self.bias_map = {} + self.name_map = {} + self.index_tab = [] + self.max_bias = 0 + self.is_device = is_device + + def get_base_name(self): + return self.base_name + + def get_funcs(self): + return self.funcs + + def get_func_by_bias(self, bias): + if bias in self.bias_map: + return self.bias_map[bias] + else: + return None + + def get_max_bias(self): + return self.max_bias + + def get_neg_size(self): + return self.max_bias + 6 + + def get_num_indices(self): + return self.max_bias // 6 + + def get_all_func_names(): + return list(self.name_map.keys()) + + def has_func(self, name): + return name in self.name_map + + def get_func_by_name(self, name): + if name in self.name_map: + return self.name_map[name] + else: + return None + + def get_num_funcs(self): + return len(self.funcs) + + def get_index_table(self): + return self.index_tab + + def get_func_by_index(self, idx): + return self.index_tab[idx] + + def add_func(self, f): + # add to list + self.funcs.append(f) + # store by bias + bias = f.get_bias() + if bias in self.bias_map: + raise ValueError("bias %d already added!" % bias) + self.bias_map[bias] = f + # store by name + name = f.get_name() + self.name_map[name] = f + # adjust max bias + if bias > self.max_bias: + self.max_bias = bias + # update index table + tab_len = bias // 6 + while len(self.index_tab) < tab_len: + self.index_tab.append(None) + index = tab_len - 1 + self.index_tab[index] = f + + def add_call(self, name, bias, arg, reg, is_std=False): + if len(arg) != len(reg): + raise IOError("Reg and Arg name mismatch in function definition") + else: + func_def = FuncDef(name, bias, False, is_std) + self.add_func(func_def) + if arg and len(arg) > 0: + num_args = len(arg) + for i in range(num_args): + func_def.add_arg(arg[i], reg[i]) + + def dump(self): + print(("FuncTable:", self.base_name)) + for f in self.funcs: + f.dump() + + def get_num_std_calls(self): + if self.is_device: + return 6 + else: + return 4 + + def add_std_calls(self): + if self.is_device: + self.add_call("_OpenDev", 6, ["IORequest", "Unit"], ["a1", "d0"], True) + self.add_call("_CloseDev", 12, ["IORequest"], ["a1"], True) + self.add_call("_ExpungeDev", 18, ["MyDev"], ["a6"], True) + self.add_call("_Empty", 24, [], [], True) + self.add_call("BeginIO", 30, ["IORequest"], ["a1"], True) + self.add_call("AbortIO", 36, ["IORequest"], ["a1"], True) + else: + self.add_call("_OpenLib", 6, ["MyLib"], ["a6"], True) + self.add_call("_CloseLib", 12, ["MyLib"], ["a6"], True) + self.add_call("_ExpungeLib", 18, ["MyLib"], ["a6"], True) + self.add_call("_Empty", 24, [], [], True) diff --git a/amitools/fs/ADFSBitmap.py b/amitools/fs/ADFSBitmap.py index 49a12906..03759ae4 100644 --- a/amitools/fs/ADFSBitmap.py +++ b/amitools/fs/ADFSBitmap.py @@ -1,6 +1,3 @@ - - - import struct import ctypes @@ -9,343 +6,356 @@ from .DosType import * from .FSError import * + class ADFSBitmap: - def __init__(self, root_blk): - self.root_blk = root_blk - self.blkdev = self.root_blk.blkdev - # state - self.ext_blks = [] - self.bitmap_blks = [] - self.bitmap_data = None - self.valid = False - # bitmap block entries - self.bitmap_blk_bytes = root_blk.blkdev.block_bytes - 4 - self.bitmap_blk_longs = root_blk.blkdev.block_longs - 1 - # calc size of bitmap - self.bitmap_bits = self.blkdev.num_blocks - self.blkdev.reserved - self.bitmap_longs = (self.bitmap_bits + 31) // 32 - self.bitmap_bytes = (self.bitmap_bits + 7) // 8 - # number of blocks required for bitmap (and bytes consumed there) - self.bitmap_num_blks = (self.bitmap_longs + self.bitmap_blk_longs - 1) // self.bitmap_blk_longs - self.bitmap_all_blk_bytes = self.bitmap_num_blks * self.bitmap_blk_bytes - # blocks stored in root and in every ext block - self.num_blks_in_root = len(self.root_blk.bitmap_ptrs) - self.num_blks_in_ext = self.blkdev.block_longs - 1 - # number of ext blocks required - self.num_ext = (self.bitmap_num_blks - self.num_blks_in_root + self.num_blks_in_ext - 1) // (self.num_blks_in_ext) - # start a root block - self.find_start = root_blk.blk_num - # was bitmap modified? - self.dirty = False - # for DOS6/7 track used blocks - self.num_used = 0 - - def create(self): - # clear local count - self.num_used = 0 - - # create data and preset with 0xff - self.bitmap_data = ctypes.create_string_buffer(self.bitmap_all_blk_bytes) - for i in range(self.bitmap_all_blk_bytes): - self.bitmap_data[i] = 0xff - - # clear bit for root block - blk_pos = self.root_blk.blk_num - self.clr_bit(blk_pos) - blk_pos += 1 - - # create ext blocks - for i in range(self.num_ext): - bm_ext = BitmapExtBlock(self.blkdev, blk_pos) - bm_ext.create() - self.clr_bit(blk_pos) - blk_pos += 1 - self.ext_blks.append(bm_ext) - - # create bitmap blocks - for i in range(self.bitmap_num_blks): - bm = BitmapBlock(self.blkdev, blk_pos) - bm.create() - self.clr_bit(blk_pos) - blk_pos += 1 - self.bitmap_blks.append(bm) - - # set pointers to ext blocks - if self.num_ext > 0: - self.root_blk.bitmap_ext_blk = self.ext_blks[0].blk_num - for i in range(self.num_ext-1): - bm_ext = self.ext_blks[i] - bm_ext_next = self.ext_blks[i+1] - bm_ext.bitmap_ext_blk = bm_ext_next.blk_num - - # set pointers to bitmap blocks - cur_ext_index = 0 - cur_ext_pos = 0 - for i in range(self.bitmap_num_blks): - blk_num = self.bitmap_blks[i].blk_num - if i < self.num_blks_in_root: - # pointers in root block - self.root_blk.bitmap_ptrs[i] = blk_num - else: - # pointers in ext block - self.ext_blks[cur_ext_index].bitmap_ptrs[cur_ext_pos] = blk_num - cur_ext_pos += 1 - if cur_ext_pos == self.num_blks_in_ext: - cur_ext_pos = 0 - cur_ext_index += 1 - self.valid = True - self.dirty = True - - def write(self): - if self.dirty: - self.dirty = False - # update bitmap - self._write_ext_blks() - self._write_bitmap_blks() - # in DOS6/DOS7 update root block stats - if rootblock_tracks_used_blocks(self.root_blk.fstype): - self.root_blk.blocks_used = self.num_used - # always write root (bitmap pointers) - self.root_blk.write() - - def _write_ext_blks(self): - # write ext blocks - for ext_blk in self.ext_blks: - ext_blk.write() - - def _write_bitmap_blks(self): - # write bitmap blocks - off = 0 - for blk in self.bitmap_blks: - blk.set_bitmap_data(self.bitmap_data[off:off+self.bitmap_blk_bytes]) - blk.write() - off += self.bitmap_blk_bytes - - def read(self): - self.bitmap_blks = [] - bitmap_data = bytearray() - - # DOS6/7: update num used - if rootblock_tracks_used_blocks(self.root_blk.fstype): - self.num_used = self.root_blk.blocks_used - - # get bitmap blocks from root block - blocks = self.root_blk.bitmap_ptrs - for blk in blocks: - if blk == 0: - break - bm = BitmapBlock(self.blkdev, blk) - bm.read() - if not bm.valid: - raise FSError(INVALID_BITMAP_BLOCK, block=bm) - self.bitmap_blks.append(bm) - bitmap_data += bm.get_bitmap_data() - - # now check extended bitmap blocks - ext_blk = self.root_blk.bitmap_ext_blk - while ext_blk != 0: - bm_ext = BitmapExtBlock(self.blkdev, ext_blk) - bm_ext.read() - self.ext_blks.append(bm_ext) - blocks = bm_ext.bitmap_ptrs - for blk in blocks: - if blk == 0: - break - bm = BitmapBlock(self.blkdev, blk) - bm.read() - if not bm.valid: - raise FSError(INVALID_BITMAP_BLOCK, block=bm) - bitmap_data += bm.get_bitmap_data() - self.bitmap_blks.append(bm) - ext_blk = bm_ext.bitmap_ext_blk - - # check bitmap data - num_bm_blks = len(self.bitmap_blks) - num_bytes = self.bitmap_blk_bytes * num_bm_blks - if num_bytes != len(bitmap_data): - raise FSError(BITMAP_SIZE_MISMATCH, node=self, extra="got=%d want=%d" % (len(bitmap_data), num_bytes)) - if num_bm_blks != self.bitmap_num_blks: - raise FSError(BITMAP_BLOCK_COUNT_MISMATCH, node=self, extra="got=%d want=%d" % (self.bitmap_num_blks, num_bm_blks)) - - # create a modyfiable bitmap - self.bitmap_data = ctypes.create_string_buffer(len(bitmap_data)) - self.bitmap_data[:] = bitmap_data - self.valid = True - - def find_free(self, start=None): - # give start of search - if start == None: - pos = self.find_start - else: - pos = start - # at most scan all bits - num = self.bitmap_bits - while num > 0: - # a free bit? - found = self.get_bit(pos) - old_pos = pos - pos += 1 - if pos == self.bitmap_bits + self.blkdev.reserved: - pos = self.blkdev.reserved - if found: - # start a next position - self.find_start = pos - return old_pos - num -= 1 - return None - - def find_n_free(self, num, start=None): - first_blk = self.find_free(start) - if first_blk == None: - return None - if num == 1: - return [first_blk] - result = [first_blk] - for i in range(num-1): - blk_num = self.find_free() - if blk_num == None: - return None - if blk_num in result: + def __init__(self, root_blk): + self.root_blk = root_blk + self.blkdev = self.root_blk.blkdev + # state + self.ext_blks = [] + self.bitmap_blks = [] + self.bitmap_data = None + self.valid = False + # bitmap block entries + self.bitmap_blk_bytes = root_blk.blkdev.block_bytes - 4 + self.bitmap_blk_longs = root_blk.blkdev.block_longs - 1 + # calc size of bitmap + self.bitmap_bits = self.blkdev.num_blocks - self.blkdev.reserved + self.bitmap_longs = (self.bitmap_bits + 31) // 32 + self.bitmap_bytes = (self.bitmap_bits + 7) // 8 + # number of blocks required for bitmap (and bytes consumed there) + self.bitmap_num_blks = ( + self.bitmap_longs + self.bitmap_blk_longs - 1 + ) // self.bitmap_blk_longs + self.bitmap_all_blk_bytes = self.bitmap_num_blks * self.bitmap_blk_bytes + # blocks stored in root and in every ext block + self.num_blks_in_root = len(self.root_blk.bitmap_ptrs) + self.num_blks_in_ext = self.blkdev.block_longs - 1 + # number of ext blocks required + self.num_ext = ( + self.bitmap_num_blks - self.num_blks_in_root + self.num_blks_in_ext - 1 + ) // (self.num_blks_in_ext) + # start a root block + self.find_start = root_blk.blk_num + # was bitmap modified? + self.dirty = False + # for DOS6/7 track used blocks + self.num_used = 0 + + def create(self): + # clear local count + self.num_used = 0 + + # create data and preset with 0xff + self.bitmap_data = ctypes.create_string_buffer(self.bitmap_all_blk_bytes) + for i in range(self.bitmap_all_blk_bytes): + self.bitmap_data[i] = 0xFF + + # clear bit for root block + blk_pos = self.root_blk.blk_num + self.clr_bit(blk_pos) + blk_pos += 1 + + # create ext blocks + for i in range(self.num_ext): + bm_ext = BitmapExtBlock(self.blkdev, blk_pos) + bm_ext.create() + self.clr_bit(blk_pos) + blk_pos += 1 + self.ext_blks.append(bm_ext) + + # create bitmap blocks + for i in range(self.bitmap_num_blks): + bm = BitmapBlock(self.blkdev, blk_pos) + bm.create() + self.clr_bit(blk_pos) + blk_pos += 1 + self.bitmap_blks.append(bm) + + # set pointers to ext blocks + if self.num_ext > 0: + self.root_blk.bitmap_ext_blk = self.ext_blks[0].blk_num + for i in range(self.num_ext - 1): + bm_ext = self.ext_blks[i] + bm_ext_next = self.ext_blks[i + 1] + bm_ext.bitmap_ext_blk = bm_ext_next.blk_num + + # set pointers to bitmap blocks + cur_ext_index = 0 + cur_ext_pos = 0 + for i in range(self.bitmap_num_blks): + blk_num = self.bitmap_blks[i].blk_num + if i < self.num_blks_in_root: + # pointers in root block + self.root_blk.bitmap_ptrs[i] = blk_num + else: + # pointers in ext block + self.ext_blks[cur_ext_index].bitmap_ptrs[cur_ext_pos] = blk_num + cur_ext_pos += 1 + if cur_ext_pos == self.num_blks_in_ext: + cur_ext_pos = 0 + cur_ext_index += 1 + self.valid = True + self.dirty = True + + def write(self): + if self.dirty: + self.dirty = False + # update bitmap + self._write_ext_blks() + self._write_bitmap_blks() + # in DOS6/DOS7 update root block stats + if rootblock_tracks_used_blocks(self.root_blk.fstype): + self.root_blk.blocks_used = self.num_used + # always write root (bitmap pointers) + self.root_blk.write() + + def _write_ext_blks(self): + # write ext blocks + for ext_blk in self.ext_blks: + ext_blk.write() + + def _write_bitmap_blks(self): + # write bitmap blocks + off = 0 + for blk in self.bitmap_blks: + blk.set_bitmap_data(self.bitmap_data[off : off + self.bitmap_blk_bytes]) + blk.write() + off += self.bitmap_blk_bytes + + def read(self): + self.bitmap_blks = [] + bitmap_data = bytearray() + + # DOS6/7: update num used + if rootblock_tracks_used_blocks(self.root_blk.fstype): + self.num_used = self.root_blk.blocks_used + + # get bitmap blocks from root block + blocks = self.root_blk.bitmap_ptrs + for blk in blocks: + if blk == 0: + break + bm = BitmapBlock(self.blkdev, blk) + bm.read() + if not bm.valid: + raise FSError(INVALID_BITMAP_BLOCK, block=bm) + self.bitmap_blks.append(bm) + bitmap_data += bm.get_bitmap_data() + + # now check extended bitmap blocks + ext_blk = self.root_blk.bitmap_ext_blk + while ext_blk != 0: + bm_ext = BitmapExtBlock(self.blkdev, ext_blk) + bm_ext.read() + self.ext_blks.append(bm_ext) + blocks = bm_ext.bitmap_ptrs + for blk in blocks: + if blk == 0: + break + bm = BitmapBlock(self.blkdev, blk) + bm.read() + if not bm.valid: + raise FSError(INVALID_BITMAP_BLOCK, block=bm) + bitmap_data += bm.get_bitmap_data() + self.bitmap_blks.append(bm) + ext_blk = bm_ext.bitmap_ext_blk + + # check bitmap data + num_bm_blks = len(self.bitmap_blks) + num_bytes = self.bitmap_blk_bytes * num_bm_blks + if num_bytes != len(bitmap_data): + raise FSError( + BITMAP_SIZE_MISMATCH, + node=self, + extra="got=%d want=%d" % (len(bitmap_data), num_bytes), + ) + if num_bm_blks != self.bitmap_num_blks: + raise FSError( + BITMAP_BLOCK_COUNT_MISMATCH, + node=self, + extra="got=%d want=%d" % (self.bitmap_num_blks, num_bm_blks), + ) + + # create a modyfiable bitmap + self.bitmap_data = ctypes.create_string_buffer(len(bitmap_data)) + self.bitmap_data[:] = bitmap_data + self.valid = True + + def find_free(self, start=None): + # give start of search + if start == None: + pos = self.find_start + else: + pos = start + # at most scan all bits + num = self.bitmap_bits + while num > 0: + # a free bit? + found = self.get_bit(pos) + old_pos = pos + pos += 1 + if pos == self.bitmap_bits + self.blkdev.reserved: + pos = self.blkdev.reserved + if found: + # start a next position + self.find_start = pos + return old_pos + num -= 1 return None - result.append(blk_num) - return result - - def get_num_free(self): - num = 0 - res = self.blkdev.reserved - for i in range(self.bitmap_bits): - if self.get_bit(i + res): - num+=1 - return num - - def get_num_used(self): - num = 0 - res = self.blkdev.reserved - for i in range(self.bitmap_bits): - if not self.get_bit(i + res): - num+=1 - return num - - def alloc_n(self, num, start=None): - free_blks = self.find_n_free(num, start) - if free_blks == None: - return None - for b in free_blks: - self.clr_bit(b) - return free_blks - - def dealloc_n(self, blks): - for b in blks: - self.set_bit(b) - - def get_bit(self, off): - if off < self.blkdev.reserved or off >= self.blkdev.num_blocks: - return None - off = (off - self.blkdev.reserved) - long_off = off // 32 - bit_off = off % 32 - val = struct.unpack_from(">I", self.bitmap_data, long_off * 4)[0] - mask = 1 << bit_off - return (val & mask) == mask - - # mark as free - def set_bit(self, off): - if off < self.blkdev.reserved or off >= self.blkdev.num_blocks: - return False - off = (off - self.blkdev.reserved) - long_off = off // 32 - bit_off = off % 32 - val = struct.unpack_from(">I", self.bitmap_data, long_off * 4)[0] - mask = 1 << bit_off - if val & mask == 0: - val = val | mask - struct.pack_into(">I", self.bitmap_data, long_off * 4, val) - self.dirty = True - self.num_used -= 1 - - # mark as used - def clr_bit(self, off): - if off < self.blkdev.reserved or off >= self.blkdev.num_blocks: - return False - off = (off - self.blkdev.reserved) - long_off = off // 32 - bit_off = off % 32 - val = struct.unpack_from(">I", self.bitmap_data, long_off * 4)[0] - mask = 1 << bit_off - if val & mask == mask: - val = val & ~mask - struct.pack_into(">I", self.bitmap_data, long_off * 4, val) - self.dirty = True - self.num_used += 1 - - def dump(self): - print("Bitmap:") - print(" ext: ",self.ext_blks) - print(" blks:",len(self.bitmap_blks)) - print(" bits:",len(self.bitmap_data) * 8,self.blkdev.num_blocks) - - def print_info(self): - num_free = self.get_num_free() - num_used = self.get_num_used() - print("num free:", num_free) - print("num used:", num_used) - if rootblock_tracks_used_blocks(self.root_blk.fstype): - print("num used:", self.num_used, "(cached in root)") - print("sum: ", num_free + num_used) - print("total: ", self.bitmap_bits) - - def create_draw_bitmap(self): - bm = ctypes.create_string_buffer(self.blkdev.num_blocks) - for i in range(self.blkdev.num_blocks): - bm[i] = chr(0) - return bm - - def print_free(self, brief=False): - bm = self.create_draw_bitmap() - res = self.blkdev.reserved - for i in range(self.blkdev.num_blocks): - if i >= res and self.get_bit(i): - bm[i] = 'F' - self.print_draw_bitmap(bm, brief) - - def print_used(self, brief=False): - bm = self.create_draw_bitmap() - res = self.blkdev.reserved - for i in range(self.blkdev.num_blocks): - if i >= res and not self.get_bit(i): - bm[i] = '#' - self.print_draw_bitmap(bm, brief) - - def draw_on_bitmap(self, bm): - # show reserved blocks - res = self.blkdev.reserved - bm[0:res] = "x" * res - # root block - bm[self.root_blk.blk_num] = 'R' - # bitmap blocks - for bm_blk in self.bitmap_blks: - bm[bm_blk.blk_num] = 'b' - # bitmap ext blocks - for ext_blk in self.ext_blks: - bm[ext_blk.blk_num] = 'B' - - def print_draw_bitmap(self, bm, brief=False): - line = "" - blk = 0 - blk_cyl = self.blkdev.sectors * self.blkdev.heads - found = False - for i in range(self.blkdev.num_blocks): - c = bm[i] - if ord(c) == 0: - c = '.' - else: - found = True - line += c - if i % self.blkdev.sectors == self.blkdev.sectors - 1: - line += " " - if i % blk_cyl == blk_cyl - 1: - if not brief or found: - print("%8d: %s" % (blk,line)) - blk += blk_cyl + + def find_n_free(self, num, start=None): + first_blk = self.find_free(start) + if first_blk == None: + return None + if num == 1: + return [first_blk] + result = [first_blk] + for i in range(num - 1): + blk_num = self.find_free() + if blk_num == None: + return None + if blk_num in result: + return None + result.append(blk_num) + return result + + def get_num_free(self): + num = 0 + res = self.blkdev.reserved + for i in range(self.bitmap_bits): + if self.get_bit(i + res): + num += 1 + return num + + def get_num_used(self): + num = 0 + res = self.blkdev.reserved + for i in range(self.bitmap_bits): + if not self.get_bit(i + res): + num += 1 + return num + + def alloc_n(self, num, start=None): + free_blks = self.find_n_free(num, start) + if free_blks == None: + return None + for b in free_blks: + self.clr_bit(b) + return free_blks + + def dealloc_n(self, blks): + for b in blks: + self.set_bit(b) + + def get_bit(self, off): + if off < self.blkdev.reserved or off >= self.blkdev.num_blocks: + return None + off = off - self.blkdev.reserved + long_off = off // 32 + bit_off = off % 32 + val = struct.unpack_from(">I", self.bitmap_data, long_off * 4)[0] + mask = 1 << bit_off + return (val & mask) == mask + + # mark as free + def set_bit(self, off): + if off < self.blkdev.reserved or off >= self.blkdev.num_blocks: + return False + off = off - self.blkdev.reserved + long_off = off // 32 + bit_off = off % 32 + val = struct.unpack_from(">I", self.bitmap_data, long_off * 4)[0] + mask = 1 << bit_off + if val & mask == 0: + val = val | mask + struct.pack_into(">I", self.bitmap_data, long_off * 4, val) + self.dirty = True + self.num_used -= 1 + + # mark as used + def clr_bit(self, off): + if off < self.blkdev.reserved or off >= self.blkdev.num_blocks: + return False + off = off - self.blkdev.reserved + long_off = off // 32 + bit_off = off % 32 + val = struct.unpack_from(">I", self.bitmap_data, long_off * 4)[0] + mask = 1 << bit_off + if val & mask == mask: + val = val & ~mask + struct.pack_into(">I", self.bitmap_data, long_off * 4, val) + self.dirty = True + self.num_used += 1 + + def dump(self): + print("Bitmap:") + print(" ext: ", self.ext_blks) + print(" blks:", len(self.bitmap_blks)) + print(" bits:", len(self.bitmap_data) * 8, self.blkdev.num_blocks) + + def print_info(self): + num_free = self.get_num_free() + num_used = self.get_num_used() + print("num free:", num_free) + print("num used:", num_used) + if rootblock_tracks_used_blocks(self.root_blk.fstype): + print("num used:", self.num_used, "(cached in root)") + print("sum: ", num_free + num_used) + print("total: ", self.bitmap_bits) + + def create_draw_bitmap(self): + bm = ctypes.create_string_buffer(self.blkdev.num_blocks) + for i in range(self.blkdev.num_blocks): + bm[i] = chr(0) + return bm + + def print_free(self, brief=False): + bm = self.create_draw_bitmap() + res = self.blkdev.reserved + for i in range(self.blkdev.num_blocks): + if i >= res and self.get_bit(i): + bm[i] = "F" + self.print_draw_bitmap(bm, brief) + + def print_used(self, brief=False): + bm = self.create_draw_bitmap() + res = self.blkdev.reserved + for i in range(self.blkdev.num_blocks): + if i >= res and not self.get_bit(i): + bm[i] = "#" + self.print_draw_bitmap(bm, brief) + + def draw_on_bitmap(self, bm): + # show reserved blocks + res = self.blkdev.reserved + bm[0:res] = "x" * res + # root block + bm[self.root_blk.blk_num] = "R" + # bitmap blocks + for bm_blk in self.bitmap_blks: + bm[bm_blk.blk_num] = "b" + # bitmap ext blocks + for ext_blk in self.ext_blks: + bm[ext_blk.blk_num] = "B" + + def print_draw_bitmap(self, bm, brief=False): line = "" + blk = 0 + blk_cyl = self.blkdev.sectors * self.blkdev.heads found = False + for i in range(self.blkdev.num_blocks): + c = bm[i] + if ord(c) == 0: + c = "." + else: + found = True + line += c + if i % self.blkdev.sectors == self.blkdev.sectors - 1: + line += " " + if i % blk_cyl == blk_cyl - 1: + if not brief or found: + print("%8d: %s" % (blk, line)) + blk += blk_cyl + line = "" + found = False diff --git a/amitools/fs/ADFSDir.py b/amitools/fs/ADFSDir.py index 8b0e2a87..8a8d2acb 100644 --- a/amitools/fs/ADFSDir.py +++ b/amitools/fs/ADFSDir.py @@ -1,6 +1,3 @@ - - - import struct from .block.Block import Block from .block.UserDirBlock import UserDirBlock @@ -12,507 +9,541 @@ from .FSString import FSString from .MetaInfo import * + class ADFSDir(ADFSNode): - def __init__(self, volume, parent): - ADFSNode.__init__(self, volume, parent) - # state - self.entries = None - self.dcache_blks = None - self.name_hash = None - self.hash_size = 72 - self.valid = False - - def __repr__(self): - if self.block != None: - return "[Dir(%d)'%s':%s]" % (self.block.blk_num, self.block.name, self.entries) - else: - return "[Dir]" - - def blocks_create_old(self, anon_blk): - ud = UserDirBlock(self.blkdev, anon_blk.blk_num, self.volume.is_longname) - ud.set(anon_blk.data) - if not ud.valid: - raise FSError(INVALID_USER_DIR_BLOCK, block=anon_blk) - self.set_block(ud) - return ud - - def _read_add_node(self, blk, recursive): - hash_chain = None - node = None - if blk.valid_chksum and blk.type == Block.T_SHORT: - # its a userdir - if blk.sub_type == Block.ST_USERDIR: + def __init__(self, volume, parent): + ADFSNode.__init__(self, volume, parent) + # state + self.entries = None + self.dcache_blks = None + self.name_hash = None + self.hash_size = 72 + self.valid = False + + def __repr__(self): + if self.block != None: + return "[Dir(%d)'%s':%s]" % ( + self.block.blk_num, + self.block.name, + self.entries, + ) + else: + return "[Dir]" + + def blocks_create_old(self, anon_blk): + ud = UserDirBlock(self.blkdev, anon_blk.blk_num, self.volume.is_longname) + ud.set(anon_blk.data) + if not ud.valid: + raise FSError(INVALID_USER_DIR_BLOCK, block=anon_blk) + self.set_block(ud) + return ud + + def _read_add_node(self, blk, recursive): + hash_chain = None + node = None + if blk.valid_chksum and blk.type == Block.T_SHORT: + # its a userdir + if blk.sub_type == Block.ST_USERDIR: + node = ADFSDir(self.volume, self) + blk = node.blocks_create_old(blk) + if recursive: + node.read() + # its a file + elif blk.sub_type == Block.ST_FILE: + node = ADFSFile(self.volume, self) + blk = node.blocks_create_old(blk) + # unsupported + else: + raise FSError( + UNSUPPORTED_DIR_BLOCK, + block=blk, + extra="Sub_Type: %08x" % blk.sub_type, + ) + hash_chain = blk.hash_chain + return hash_chain, node + + def _init_name_hash(self): + self.name_hash = [] + self.hash_size = self.block.hash_size + for i in range(self.hash_size): + self.name_hash.append([]) + + def read(self, recursive=False): + self._init_name_hash() + self.entries = [] + + # create initial list with blk_num/hash_index for dir scan + blocks = [] + for i in range(self.block.hash_size): + blk_num = self.block.hash_table[i] + if blk_num != 0: + blocks.append((blk_num, i)) + + for blk_num, hash_idx in blocks: + # read anonymous block + blk = Block(self.blkdev, blk_num) + blk.read() + if not blk.valid: + self.valid = False + return + # create file/dir node + hash_chain, node = self._read_add_node(blk, recursive) + # store node in entries + self.entries.append(node) + # store node in name_hash + self.name_hash[hash_idx].append(node) + # follow hash chain + if hash_chain != 0: + blocks.append((hash_chain, hash_idx)) + + # dircaches available? + if self.volume.is_dircache: + self.dcache_blks = [] + dcb_num = self.block.extension + while dcb_num != 0: + dcb = DirCacheBlock(self.blkdev, dcb_num) + dcb.read() + if not dcb.valid: + self.valid = False + return + self.dcache_blks.append(dcb) + dcb_num = dcb.next_cache + + def flush(self): + if self.entries: + for e in self.entries: + e.flush() + self.entries = None + self.name_hash = None + + def ensure_entries(self): + if not self.entries: + self.read() + + def get_entries(self): + self.ensure_entries() + return self.entries + + def has_name(self, fn): + fn_hash = fn.hash(hash_size=self.hash_size) + fn_up = fn.get_upper_ami_str() + node_list = self.name_hash[fn_hash] + for node in node_list: + if node.name.get_upper_ami_str() == fn_up: + return True + return False + + def blocks_create_new(self, free_blks, name, hash_chain_blk, parent_blk, meta_info): + blk_num = free_blks[0] + blkdev = self.blkdev + # create a UserDirBlock + ud = UserDirBlock(blkdev, blk_num, self.volume.is_longname) + ud.create( + parent_blk, + name, + meta_info.get_protect(), + meta_info.get_comment(), + meta_info.get_mod_ts(), + hash_chain_blk, + ) + ud.write() + self.set_block(ud) + self._init_name_hash() + return blk_num + + def blocks_get_create_num(self): + # the number of blocks needed for a new (empty) directory + # -> only one UserDirBlock + return 1 + + def _create_node(self, node, name, meta_info, update_ts=True): + self.ensure_entries() + + # make sure a default meta_info is available + if meta_info == None: + meta_info = MetaInfo() + meta_info.set_current_as_mod_time() + meta_info.set_default_protect() + # check file name + fn = FileName( + name, is_intl=self.volume.is_intl, is_longname=self.volume.is_longname + ) + if not fn.is_valid(): + raise FSError(INVALID_FILE_NAME, file_name=name, node=self) + # does already exist an entry in this dir with this name? + if self.has_name(fn): + raise FSError(NAME_ALREADY_EXISTS, file_name=name, node=self) + # calc hash index of name + fn_hash = fn.hash(hash_size=self.hash_size) + hash_chain = self.name_hash[fn_hash] + if len(hash_chain) == 0: + hash_chain_blk = 0 + else: + hash_chain_blk = hash_chain[0].block.blk_num + + # return the number of blocks required to create this node + num_blks = node.blocks_get_create_num() + + # try to find free blocks + free_blks = self.volume.bitmap.alloc_n(num_blks) + if free_blks == None: + raise FSError( + NO_FREE_BLOCKS, node=self, file_name=name, extra="want %d" % num_blks + ) + + # now create the blocks for this node + new_blk = node.blocks_create_new( + free_blks, name, hash_chain_blk, self.block.blk_num, meta_info + ) + + # dircache: create record for this node + if self.volume.is_dircache: + ok = self._dircache_add_entry( + name, meta_info, new_blk, node.get_size(), update_myself=False + ) + if not ok: + self.delete() + raise FSError( + NO_FREE_BLOCKS, node=self, file_name=name, extra="want dcache" + ) + + # update my dir + self.block.hash_table[fn_hash] = new_blk + self.block.write() + + # add node + self.name_hash[fn_hash].insert(0, node) + self.entries.append(node) + + # update time stamps + if update_ts: + self.update_dir_mod_time() + self.volume.update_disk_time() + + def update_dir_mod_time(self): + mi = MetaInfo() + mi.set_current_as_mod_time() + self.change_meta_info(mi) + + def create_dir(self, name, meta_info=None, update_ts=True): + if not isinstance(name, FSString): + raise ValueError("create_dir's name must be a FSString") node = ADFSDir(self.volume, self) - blk = node.blocks_create_old(blk) - if recursive: - node.read() - # its a file - elif blk.sub_type == Block.ST_FILE: + self._create_node(node, name, meta_info, update_ts) + return node + + def create_file(self, name, data, meta_info=None, update_ts=True): + if not isinstance(name, FSString): + raise ValueError("create_file's name must be a FSString") node = ADFSFile(self.volume, self) - blk = node.blocks_create_old(blk) - # unsupported - else: - raise FSError(UNSUPPORTED_DIR_BLOCK, block=blk, extra="Sub_Type: %08x" % blk.sub_type) - hash_chain = blk.hash_chain - return hash_chain,node - - def _init_name_hash(self): - self.name_hash = [] - self.hash_size = self.block.hash_size - for i in range(self.hash_size): - self.name_hash.append([]) - - def read(self, recursive=False): - self._init_name_hash() - self.entries = [] - - # create initial list with blk_num/hash_index for dir scan - blocks = [] - for i in range(self.block.hash_size): - blk_num = self.block.hash_table[i] - if blk_num != 0: - blocks.append((blk_num,i)) - - for blk_num,hash_idx in blocks: - # read anonymous block - blk = Block(self.blkdev, blk_num) - blk.read() - if not blk.valid: - self.valid = False - return - # create file/dir node - hash_chain,node = self._read_add_node(blk, recursive) - # store node in entries - self.entries.append(node) - # store node in name_hash - self.name_hash[hash_idx].append(node) - # follow hash chain - if hash_chain != 0: - blocks.append((hash_chain,hash_idx)) - - # dircaches available? - if self.volume.is_dircache: - self.dcache_blks = [] - dcb_num = self.block.extension - while dcb_num != 0: - dcb = DirCacheBlock(self.blkdev, dcb_num) - dcb.read() - if not dcb.valid: - self.valid = False - return - self.dcache_blks.append(dcb) - dcb_num = dcb.next_cache - - def flush(self): - if self.entries: - for e in self.entries: - e.flush() - self.entries = None - self.name_hash = None - - def ensure_entries(self): - if not self.entries: - self.read() - - def get_entries(self): - self.ensure_entries() - return self.entries - - def has_name(self, fn): - fn_hash = fn.hash(hash_size=self.hash_size) - fn_up = fn.get_upper_ami_str() - node_list = self.name_hash[fn_hash] - for node in node_list: - if node.name.get_upper_ami_str() == fn_up: + node.set_file_data(data) + self._create_node(node, name, meta_info, update_ts) + return node + + def _delete(self, node, wipe, update_ts): + self.ensure_entries() + + # can we delete? + if not node.can_delete(): + raise FSError(DELETE_NOT_ALLOWED, node=node) + # make sure its a node of mine + if node.parent != self: + raise FSError(INTERNAL_ERROR, node=node, extra="node parent is not me") + if node not in self.entries: + raise FSError(INTERNAL_ERROR, node=node, extra="node not in entries") + # get hash key + hash_key = node.name.hash(hash_size=self.hash_size) + names = self.name_hash[hash_key] + # find my node + pos = None + for i in range(len(names)): + if names[i] == node: + pos = i + break + # hmm not found?! + if pos == None: + raise FSError( + INTERNAL_ERROR, node=node, extra="node not found in hash chain" + ) + # find prev and next in hash list + if pos > 0: + prev = names[pos - 1] + else: + prev = None + if pos == len(names) - 1: + next_blk = 0 + else: + next_blk = names[pos + 1].block.blk_num + + # remove node from the hash chain + if prev == None: + self.block.hash_table[hash_key] = next_blk + self.block.write() + else: + prev.block.hash_chain = next_blk + prev.block.write() + + # remove from my lists + self.entries.remove(node) + names.remove(node) + + # remove blocks of node in bitmap + blk_nums = node.get_block_nums() + self.volume.bitmap.dealloc_n(blk_nums) + + # dircache? + if self.volume.is_dircache: + free_blk_num = self._dircache_remove_entry(node.name.name) + else: + free_blk_num = None + + # (optional) wipe blocks + if wipe: + clr_blk = "\0" * self.blkdev.block_bytes + for blk_num in blk_nums: + self.blkdev.write_block(blk_num, clr_blk) + # wipe a potentially free'ed dircache block, too + if free_blk_num != None: + self.blkdev.write_block(free_blk_num, clr_blk) + + # update time stamps + if update_ts: + self.update_dir_mod_time() + self.volume.update_disk_time() + + def can_delete(self): + self.ensure_entries() + return len(self.entries) == 0 + + def delete_children(self, wipe, all, update_ts): + self.ensure_entries() + entries = self.entries[:] + for e in entries: + e.delete(wipe, all, update_ts) + + def get_entries_sorted_by_name(self): + self.ensure_entries() + return sorted(self.entries, key=lambda x: x.name.get_upper_ami_str()) + + def list(self, indent=0, all=False, detail=False, encoding="UTF-8"): + ADFSNode.list(self, indent, all, detail, encoding) + if not all and indent > 0: + return + self.ensure_entries() + es = self.get_entries_sorted_by_name() + for e in es: + e.list(indent=indent + 1, all=all, detail=detail, encoding=encoding) + + def get_path(self, pc, allow_file=True, allow_dir=True): + if len(pc) == 0: + return self + self.ensure_entries() + for e in self.entries: + if not isinstance(pc[0], FileName): + raise ValueError("get_path's pc must be a FileName array") + if e.name.get_upper_ami_str() == pc[0].get_upper_ami_str(): + if len(pc) > 1: + if isinstance(e, ADFSDir): + return e.get_path(pc[1:], allow_file, allow_dir) + else: + return None + else: + if isinstance(e, ADFSDir): + if allow_dir: + return e + else: + return None + elif isinstance(e, ADFSFile): + if allow_file: + return e + else: + return None + else: + return None + return None + + def draw_on_bitmap(self, bm, show_all=False, first=True): + blk_num = self.block.blk_num + bm[blk_num] = "D" + if show_all or first: + self.ensure_entries() + for e in self.entries: + e.draw_on_bitmap(bm, show_all, False) + if self.dcache_blks != None: + for dcb in self.dcache_blks: + bm[dcb.blk_num] = "C" + + def get_block_nums(self): + self.ensure_entries() + result = [self.block.blk_num] + if self.volume.is_dircache: + for dcb in self.dcache_blks: + result.append(dcb.blk_num) + return result + + def get_blocks(self, with_data=False): + self.ensure_entries() + result = [self.block] + if self.volume.is_dircache: + result += self.dcache_blks + return result + + def get_size(self): + return 0 + + def get_size_str(self): + return "DIR" + + def get_detail_str(self): + self.ensure_entries() + if self.entries != None: + s = "entries=%d" % len(self.entries) + else: + s = "" + if self.dcache_blks != None: + s += " dcache=%d" % len(self.dcache_blks) + return s + + # ----- dir cache ----- + + def _dircache_add_entry(self, name, meta_info, entry_blk, size, update_myself=True): + # create a new dircache record + r = DirCacheRecord( + entry=entry_blk, + size=size, + protect=meta_info.get_protect(), + mod_ts=meta_info.get_mod_ts(), + sub_type=0, + name=name, + comment=meta_info.get_comment(), + ) + return self._dircache_add_entry_int(r, update_myself) + + def _dircache_add_entry_int(self, r, update_myself=True): + r_bytes = r.get_size() + # find a dircache block with enough space + found_blk = None + for dcb in self.dcache_blks: + free_bytes = dcb.get_free_record_size() + if r_bytes < free_bytes: + found_blk = dcb + break + # need to create a new one? + if found_blk == None: + found_blk = self._dircache_add_block(update_myself) + if found_blk == None: + return False + # add record to block and update it + found_blk.add_record(r) + found_blk.write() return True - return False - - def blocks_create_new(self, free_blks, name, hash_chain_blk, parent_blk, meta_info): - blk_num = free_blks[0] - blkdev = self.blkdev - # create a UserDirBlock - ud = UserDirBlock(blkdev, blk_num, self.volume.is_longname) - ud.create(parent_blk, name, meta_info.get_protect(), meta_info.get_comment(), meta_info.get_mod_ts(), hash_chain_blk) - ud.write() - self.set_block(ud) - self._init_name_hash() - return blk_num - - def blocks_get_create_num(self): - # the number of blocks needed for a new (empty) directory - # -> only one UserDirBlock - return 1 - - def _create_node(self, node, name, meta_info, update_ts=True): - self.ensure_entries() - - # make sure a default meta_info is available - if meta_info == None: - meta_info = MetaInfo() - meta_info.set_current_as_mod_time() - meta_info.set_default_protect() - # check file name - fn = FileName(name, is_intl=self.volume.is_intl,is_longname=self.volume.is_longname) - if not fn.is_valid(): - raise FSError(INVALID_FILE_NAME, file_name=name, node=self) - # does already exist an entry in this dir with this name? - if self.has_name(fn): - raise FSError(NAME_ALREADY_EXISTS, file_name=name, node=self) - # calc hash index of name - fn_hash = fn.hash(hash_size=self.hash_size) - hash_chain = self.name_hash[fn_hash] - if len(hash_chain) == 0: - hash_chain_blk = 0 - else: - hash_chain_blk = hash_chain[0].block.blk_num - - # return the number of blocks required to create this node - num_blks = node.blocks_get_create_num() - - # try to find free blocks - free_blks = self.volume.bitmap.alloc_n(num_blks) - if free_blks == None: - raise FSError(NO_FREE_BLOCKS, node=self, file_name=name, extra="want %d" % num_blks) - - # now create the blocks for this node - new_blk = node.blocks_create_new(free_blks, name, hash_chain_blk, self.block.blk_num, meta_info) - - # dircache: create record for this node - if self.volume.is_dircache: - ok = self._dircache_add_entry(name, meta_info, new_blk, node.get_size(), update_myself=False) - if not ok: - self.delete() - raise FSError(NO_FREE_BLOCKS, node=self, file_name=name, extra="want dcache") - - # update my dir - self.block.hash_table[fn_hash] = new_blk - self.block.write() - - # add node - self.name_hash[fn_hash].insert(0,node) - self.entries.append(node) - - # update time stamps - if update_ts: - self.update_dir_mod_time() - self.volume.update_disk_time() - - def update_dir_mod_time(self): - mi = MetaInfo() - mi.set_current_as_mod_time() - self.change_meta_info(mi) - - def create_dir(self, name, meta_info=None, update_ts=True): - if not isinstance(name, FSString): - raise ValueError("create_dir's name must be a FSString") - node = ADFSDir(self.volume, self) - self._create_node(node, name, meta_info, update_ts) - return node - - def create_file(self, name, data, meta_info=None, update_ts=True): - if not isinstance(name, FSString): - raise ValueError("create_file's name must be a FSString") - node = ADFSFile(self.volume, self) - node.set_file_data(data) - self._create_node(node, name, meta_info, update_ts) - return node - - def _delete(self, node, wipe, update_ts): - self.ensure_entries() - - # can we delete? - if not node.can_delete(): - raise FSError(DELETE_NOT_ALLOWED, node=node) - # make sure its a node of mine - if node.parent != self: - raise FSError(INTERNAL_ERROR, node=node, extra="node parent is not me") - if node not in self.entries: - raise FSError(INTERNAL_ERROR, node=node, extra="node not in entries") - # get hash key - hash_key = node.name.hash(hash_size=self.hash_size) - names = self.name_hash[hash_key] - # find my node - pos = None - for i in range(len(names)): - if names[i] == node: - pos = i - break - # hmm not found?! - if pos == None: - raise FSError(INTERNAL_ERROR, node=node, extra="node not found in hash chain") - # find prev and next in hash list - if pos > 0: - prev = names[pos-1] - else: - prev = None - if pos == len(names)-1: - next_blk = 0 - else: - next_blk = names[pos+1].block.blk_num - - # remove node from the hash chain - if prev == None: - self.block.hash_table[hash_key] = next_blk - self.block.write() - else: - prev.block.hash_chain = next_blk - prev.block.write() - - # remove from my lists - self.entries.remove(node) - names.remove(node) - - # remove blocks of node in bitmap - blk_nums = node.get_block_nums() - self.volume.bitmap.dealloc_n(blk_nums) - - # dircache? - if self.volume.is_dircache: - free_blk_num = self._dircache_remove_entry(node.name.name) - else: - free_blk_num = None - - # (optional) wipe blocks - if wipe: - clr_blk = '\0' * self.blkdev.block_bytes - for blk_num in blk_nums: - self.blkdev.write_block(blk_num, clr_blk) - # wipe a potentially free'ed dircache block, too - if free_blk_num != None: - self.blkdev.write_block(free_blk_num, clr_blk) - - # update time stamps - if update_ts: - self.update_dir_mod_time() - self.volume.update_disk_time() - - def can_delete(self): - self.ensure_entries() - return len(self.entries) == 0 - - def delete_children(self, wipe, all, update_ts): - self.ensure_entries() - entries = self.entries[:] - for e in entries: - e.delete(wipe, all, update_ts) - - def get_entries_sorted_by_name(self): - self.ensure_entries() - return sorted(self.entries, key=lambda x : x.name.get_upper_ami_str()) - - def list(self, indent=0, all=False, detail=False, encoding="UTF-8"): - ADFSNode.list(self, indent, all, detail, encoding) - if not all and indent > 0: - return - self.ensure_entries() - es = self.get_entries_sorted_by_name() - for e in es: - e.list(indent=indent+1, all=all, detail=detail, encoding=encoding) - - def get_path(self, pc, allow_file=True, allow_dir=True): - if len(pc) == 0: - return self - self.ensure_entries() - for e in self.entries: - if not isinstance(pc[0], FileName): - raise ValueError("get_path's pc must be a FileName array") - if e.name.get_upper_ami_str() == pc[0].get_upper_ami_str(): - if len(pc) > 1: - if isinstance(e, ADFSDir): - return e.get_path(pc[1:], allow_file, allow_dir) - else: + + def _dircache_add_block(self, update_myself): + # allocate block + blk_nums = self.volume.bitmap.alloc_n(1) + if blk_nums == None: return None + # setup dir cache block + dcb_num = blk_nums[0] + dcb = DirCacheBlock(self.blkdev, dcb_num) + dcb.create(parent=self.block.blk_num) + # link new cache block + if len(self.dcache_blks) == 0: + self.block.extension = dcb_num + if update_myself: + self.block.write() else: - if isinstance(e, ADFSDir): - if allow_dir: - return e + last_dcb = self.dcache_blks[-1] + last_dcb.next_cache = dcb_num + last_dcb.write() + self.dcache_blks.append(dcb) + return dcb + + def _dircache_remove_entry(self, name, update_myself=True): + # first find entry + pos = None + dcb = None + record = None + n = len(self.dcache_blks) + for i in range(n): + dcb = self.dcache_blks[i] + record = dcb.get_record_by_name(name) + if record != None: + pos = i + break + if record == None: + raise FSError(INTERNAL_ERROR, node=self, extra="no dc record!") + # remove entry from this block + dcb.remove_record(record) + # remove whole block? + if dcb.is_empty(): + # next block following me + if pos == n - 1: + next = 0 else: - return None - elif isinstance(e, ADFSFile): - if allow_file: - return e + next = self.dcache_blks[pos + 1].blk_num + # update block links + if pos == 0: + # adjust extension link in this dir node + self.block.extension = next + if update_myself: + self.block.write() else: - return None - else: + # adjust dircache block in front of me + prev_blk = self.dcache_blks[pos - 1] + prev_blk.next_cache = next + prev_blk.write() + # free cache block in bitmap + blk_num = dcb.blk_num + self.volume.bitmap.dealloc_n([blk_num]) + return blk_num # return number of just deleted block + else: + # update cache block with reduced set of records + dcb.write() return None - return None - - def draw_on_bitmap(self, bm, show_all=False, first=True): - blk_num = self.block.blk_num - bm[blk_num] = 'D' - if show_all or first: - self.ensure_entries() - for e in self.entries: - e.draw_on_bitmap(bm, show_all, False) - if self.dcache_blks != None: - for dcb in self.dcache_blks: - bm[dcb.blk_num] = 'C' - - def get_block_nums(self): - self.ensure_entries() - result = [self.block.blk_num] - if self.volume.is_dircache: - for dcb in self.dcache_blks: - result.append(dcb.blk_num) - return result - - def get_blocks(self, with_data=False): - self.ensure_entries() - result = [self.block] - if self.volume.is_dircache: - result += self.dcache_blks - return result - - def get_size(self): - return 0 - - def get_size_str(self): - return "DIR" - - def get_detail_str(self): - self.ensure_entries() - if self.entries != None: - s = "entries=%d" % len(self.entries) - else: - s = "" - if self.dcache_blks != None: - s += " dcache=%d" % len(self.dcache_blks) - return s - - # ----- dir cache ----- - - def _dircache_add_entry(self, name, meta_info, entry_blk, size, update_myself=True): - # create a new dircache record - r = DirCacheRecord(entry=entry_blk, size=size, protect=meta_info.get_protect(), \ - mod_ts=meta_info.get_mod_ts(), sub_type=0, name=name, - comment=meta_info.get_comment()) - return self._dircache_add_entry_int(r, update_myself) - - def _dircache_add_entry_int(self, r, update_myself=True): - r_bytes = r.get_size() - # find a dircache block with enough space - found_blk = None - for dcb in self.dcache_blks: - free_bytes = dcb.get_free_record_size() - if r_bytes < free_bytes: - found_blk = dcb - break - # need to create a new one? - if found_blk == None: - found_blk = self._dircache_add_block(update_myself) - if found_blk == None: - return False - # add record to block and update it - found_blk.add_record(r) - found_blk.write() - return True - - def _dircache_add_block(self, update_myself): - # allocate block - blk_nums = self.volume.bitmap.alloc_n(1) - if blk_nums == None: - return None - # setup dir cache block - dcb_num = blk_nums[0] - dcb = DirCacheBlock(self.blkdev, dcb_num) - dcb.create(parent=self.block.blk_num) - # link new cache block - if len(self.dcache_blks) == 0: - self.block.extension = dcb_num - if update_myself: - self.block.write() - else: - last_dcb = self.dcache_blks[-1] - last_dcb.next_cache = dcb_num - last_dcb.write() - self.dcache_blks.append(dcb) - return dcb - - def _dircache_remove_entry(self, name, update_myself=True): - # first find entry - pos = None - dcb = None - record = None - n = len(self.dcache_blks) - for i in range(n): - dcb = self.dcache_blks[i] - record = dcb.get_record_by_name(name) - if record != None: - pos = i - break - if record == None: - raise FSError(INTERNAL_ERROR, node=self, extra="no dc record!") - # remove entry from this block - dcb.remove_record(record) - # remove whole block? - if dcb.is_empty(): - # next block following me - if pos == n-1: - next = 0 - else: - next = self.dcache_blks[pos+1].blk_num - # update block links - if pos == 0: - # adjust extension link in this dir node - self.block.extension = next - if update_myself: - self.block.write() - else: - # adjust dircache block in front of me - prev_blk = self.dcache_blks[pos-1] - prev_blk.next_cache = next - prev_blk.write() - # free cache block in bitmap - blk_num = dcb.blk_num - self.volume.bitmap.dealloc_n([blk_num]) - return blk_num # return number of just deleted block - else: - # update cache block with reduced set of records - dcb.write() - return None - - def get_dircache_record(self, name): - if self.dcache_blks: - for dcb in self.dcache_blks: - record = dcb.get_record_by_name(name) - if record: - return record - return None - - def update_dircache_record(self, record, rebuild): - if self.dcache_blks == None: - return - # update record - if rebuild: - self._dircache_remove_entry(record.name, update_myself=False) - self._dircache_add_entry_int(record, update_myself=True) - else: - # simply re-write the dircache block - for dcb in self.dcache_blks: - if dcb.has_record(record): - dcb.write() - break - - def get_block_usage(self, all=False, first=True): - num_non_data = 1 - num_data = 0 - if self.dcache_blks != None: - num_non_data += len(self.dcache_blks) - if all or first: - self.ensure_entries() - for e in self.entries: - bu = e.get_block_usage(all=all, first=False) - num_data += bu[0] - num_non_data += bu[1] - return (num_data, num_non_data) - - def get_file_bytes(self, all=False, first=True): - size = 0 - if all or first: - self.ensure_entries() - for e in self.entries: - size += e.get_file_bytes(all=all, first=False) - return size - - def is_dir(self): - return True + + def get_dircache_record(self, name): + if self.dcache_blks: + for dcb in self.dcache_blks: + record = dcb.get_record_by_name(name) + if record: + return record + return None + + def update_dircache_record(self, record, rebuild): + if self.dcache_blks == None: + return + # update record + if rebuild: + self._dircache_remove_entry(record.name, update_myself=False) + self._dircache_add_entry_int(record, update_myself=True) + else: + # simply re-write the dircache block + for dcb in self.dcache_blks: + if dcb.has_record(record): + dcb.write() + break + + def get_block_usage(self, all=False, first=True): + num_non_data = 1 + num_data = 0 + if self.dcache_blks != None: + num_non_data += len(self.dcache_blks) + if all or first: + self.ensure_entries() + for e in self.entries: + bu = e.get_block_usage(all=all, first=False) + num_data += bu[0] + num_non_data += bu[1] + return (num_data, num_non_data) + + def get_file_bytes(self, all=False, first=True): + size = 0 + if all or first: + self.ensure_entries() + for e in self.entries: + size += e.get_file_bytes(all=all, first=False) + return size + + def is_dir(self): + return True diff --git a/amitools/fs/ADFSFile.py b/amitools/fs/ADFSFile.py index dd1a2909..803236db 100644 --- a/amitools/fs/ADFSFile.py +++ b/amitools/fs/ADFSFile.py @@ -1,6 +1,3 @@ - - - from .block.EntryBlock import EntryBlock from .block.FileHeaderBlock import FileHeaderBlock from .block.FileListBlock import FileListBlock @@ -8,282 +5,313 @@ from .ADFSNode import ADFSNode from .FSError import * + class ADFSFile(ADFSNode): - def __init__(self, volume, parent): - ADFSNode.__init__(self, volume, parent) - # state - self.ext_blk_nums = [] - self.ext_blks = [] - self.data_blk_nums = [] - self.data_blks = [] - self.valid = False - self.data = None - self.data_size = 0 - self.total_blks = 0 - - def __repr__(self): - return "[File(%d)'%s':%d]" % (self.block.blk_num, self.block.name, self.block.byte_size) - - def blocks_create_old(self, anon_blk): - # create file header block - fhb = FileHeaderBlock(self.blkdev, anon_blk.blk_num, self.volume.is_longname) - fhb.set(anon_blk.data) - if not fhb.valid: - raise FSError(INVALID_FILE_HEADER_BLOCK, block=anon_blk) - self.set_block(fhb) - - # retrieve data blocks and size from header - self.data_blk_nums = fhb.data_blocks[:] - self.data_size = fhb.byte_size - - # scan for extension blocks - next_ext = self.block.extension - while next_ext != 0: - ext_blk = FileListBlock(self.block.blkdev, next_ext) - ext_blk.read() - if not ext_blk.valid: - raise FSError(INVALID_FILE_LIST_BLOCK, block=ext_blk) - self.ext_blk_nums.append(next_ext) - self.ext_blks.append(ext_blk) - self.data_blk_nums += ext_blk.data_blocks - next_ext = ext_blk.extension - - # now check number of ext blocks - self.num_ext_blks = self.calc_number_of_list_blks() - my_num_ext_blks = len(self.ext_blks) - if my_num_ext_blks != self.num_ext_blks: - raise FSError(FILE_LIST_BLOCK_COUNT_MISMATCH, node=self, extra="got=%d want=%d" % (my_num_ext_blks, self.num_ext_blks)) - - # now check number of data blocks - self.num_data_blks = self.calc_number_of_data_blks() - my_num_data_blks = len(self.data_blk_nums) - if my_num_data_blks != self.num_data_blks: - raise FSError(FILE_DATA_BLOCK_COUNT_MISMATCH, node=self, extra="got=%d want=%d" % (my_num_data_blks, self.num_data_blks)) - - # calc number of total blocks occupied by this file - self.total_blks = 1 + my_num_ext_blks + my_num_data_blks - if self.block.comment_block_id != 0: - self.total_blks += 1 - - return fhb - - def read(self): - """read data blocks""" - self.data_blks = [] - want_seq_num = 1 - total_size = 0 - is_ffs = self.volume.is_ffs - byte_size = self.block.byte_size - data = bytearray() - for blk in self.data_blk_nums: - if is_ffs: - # ffs has raw data blocks - dat_blk = self.volume.blkdev.read_block(blk) - total_size += len(dat_blk) - # shrink last read if necessary - if total_size > byte_size: - shrink = total_size - byte_size - dat_blk = dat_blk[:-shrink] - total_size = byte_size - data += dat_blk - else: - # ofs - dat_blk = FileDataBlock(self.block.blkdev, blk) - dat_blk.read() - if not dat_blk.valid: - raise FSError(INVALID_FILE_DATA_BLOCK, block=dat_blk, node=self) - # check sequence number - if dat_blk.seq_num != want_seq_num: - raise FSError(INVALID_SEQ_NUM, block=dat_blk, node=self, extra="got=%d wanted=%d" % (dat_blk.seq_num, want_seq_num)) - # store data blocks - self.data_blks.append(dat_blk) - total_size += dat_blk.data_size - data += dat_blk.get_block_data() - want_seq_num += 1 - # store full contents of file - self.data = data - # make sure all went well - got_size = len(data) - want_size = self.block.byte_size - if got_size != want_size: - raise FSError(INTERNAL_ERROR, block=self.block, node=self, extra="file size mismatch: got=%d want=%d" % (got_size, want_size)) - - def get_file_data(self): - if self.data != None: - return self.data - self.read() - return self.data - - def flush(self): - self.data = None - self.data_blks = None - - def ensure_data(self): - if self.data == None: - self.read() - - def set_file_data(self, data): - self.data = data - self.data_size = len(data) - self.num_data_blks = self.calc_number_of_data_blks() - self.num_ext_blks = self.calc_number_of_list_blks() - - def get_data_block_contents_bytes(self): - """how many bytes of file data can be stored in a block?""" - bb = self.volume.blkdev.block_bytes - if self.volume.is_ffs: - return bb - else: - return bb - 24 - - def calc_number_of_data_blks(self): - """given the file size: how many data blocks do we need to store the file?""" - bb = self.get_data_block_contents_bytes() - ds = self.data_size - return (ds + bb -1 ) // bb - - def calc_number_of_list_blks(self): - """given the file size: how many list blocks do we need to store the data blk ptrs?""" - db = self.calc_number_of_data_blks() - # ptr per block - ppb = self.volume.blkdev.block_longs - 56 - # fits in header block? - if db <= ppb: - return 0 - else: - db -= ppb - return (db + ppb - 1) // ppb - - def blocks_get_create_num(self): - # determine number of blocks to create - return 1 + self.num_data_blks + self.num_ext_blks - - def blocks_create_new(self, free_blks, name, hash_chain_blk, parent_blk, meta_info): - # assign block numbers - fhb_num = free_blks[0] - # ... for ext - self.ext_blk_nums = [] - for i in range(self.num_ext_blks): - self.ext_blk_nums.append(free_blks[1+i]) - # ... for data - off = 1 + self.num_ext_blks - self.data_blk_nums = [] - for i in range(self.num_data_blks): - self.data_blk_nums.append(free_blks[off]) - off += 1 - - ppb = self.volume.blkdev.block_longs - 56 # data pointer per block - - # create file header block - fhb = FileHeaderBlock(self.blkdev, fhb_num, self.volume.is_longname) - byte_size = len(self.data) - if self.num_data_blks > ppb: - hdr_blks = self.data_blk_nums[0:ppb] - hdr_ext = self.ext_blk_nums[0] - else: - hdr_blks = self.data_blk_nums - hdr_ext = 0 - - fhb.create(parent_blk, name, hdr_blks, hdr_ext, byte_size, - meta_info.get_protect(), meta_info.get_comment(), - meta_info.get_mod_ts(), hash_chain_blk) - fhb.write() - self.set_block(fhb) - - # create file list (=ext) blocks - ext_off = ppb - for i in range(self.num_ext_blks): - flb = FileListBlock(self.blkdev, self.ext_blk_nums[i]) - if i == self.num_ext_blks - 1: - ext_blk = 0 - blks = self.data_blk_nums[ext_off:] - else: - ext_blk = self.ext_blk_nums[i+1] - blks = self.data_blk_nums[ext_off:ext_off+ppb] - flb.create(fhb_num, blks, ext_blk) - flb.write() - self.ext_blks.append(flb) - ext_off += ppb - - # write data blocks - self.write() - - self.valid = True - return fhb_num - - def write(self): - self.data_blks = [] - off = 0 - left = self.data_size - blk_idx = 0 - bs = self.get_data_block_contents_bytes() - is_ffs = self.volume.is_ffs - while off < self.data_size: - # number of data block - blk_num = self.data_blk_nums[blk_idx] - # extract file data - size = left - if size > bs: - size = bs - d = self.data[off:off+size] - if is_ffs: - # pad block - if size < bs: - d += b'\0' * (bs-size) - # write raw block data in FFS - self.blkdev.write_block(blk_num, d) - else: - # old FS: create and write data block - fdb = FileDataBlock(self.blkdev, blk_num) - if blk_idx == self.num_data_blks - 1: - next_data = 0 + def __init__(self, volume, parent): + ADFSNode.__init__(self, volume, parent) + # state + self.ext_blk_nums = [] + self.ext_blks = [] + self.data_blk_nums = [] + self.data_blks = [] + self.valid = False + self.data = None + self.data_size = 0 + self.total_blks = 0 + + def __repr__(self): + return "[File(%d)'%s':%d]" % ( + self.block.blk_num, + self.block.name, + self.block.byte_size, + ) + + def blocks_create_old(self, anon_blk): + # create file header block + fhb = FileHeaderBlock(self.blkdev, anon_blk.blk_num, self.volume.is_longname) + fhb.set(anon_blk.data) + if not fhb.valid: + raise FSError(INVALID_FILE_HEADER_BLOCK, block=anon_blk) + self.set_block(fhb) + + # retrieve data blocks and size from header + self.data_blk_nums = fhb.data_blocks[:] + self.data_size = fhb.byte_size + + # scan for extension blocks + next_ext = self.block.extension + while next_ext != 0: + ext_blk = FileListBlock(self.block.blkdev, next_ext) + ext_blk.read() + if not ext_blk.valid: + raise FSError(INVALID_FILE_LIST_BLOCK, block=ext_blk) + self.ext_blk_nums.append(next_ext) + self.ext_blks.append(ext_blk) + self.data_blk_nums += ext_blk.data_blocks + next_ext = ext_blk.extension + + # now check number of ext blocks + self.num_ext_blks = self.calc_number_of_list_blks() + my_num_ext_blks = len(self.ext_blks) + if my_num_ext_blks != self.num_ext_blks: + raise FSError( + FILE_LIST_BLOCK_COUNT_MISMATCH, + node=self, + extra="got=%d want=%d" % (my_num_ext_blks, self.num_ext_blks), + ) + + # now check number of data blocks + self.num_data_blks = self.calc_number_of_data_blks() + my_num_data_blks = len(self.data_blk_nums) + if my_num_data_blks != self.num_data_blks: + raise FSError( + FILE_DATA_BLOCK_COUNT_MISMATCH, + node=self, + extra="got=%d want=%d" % (my_num_data_blks, self.num_data_blks), + ) + + # calc number of total blocks occupied by this file + self.total_blks = 1 + my_num_ext_blks + my_num_data_blks + if self.block.comment_block_id != 0: + self.total_blks += 1 + + return fhb + + def read(self): + """read data blocks""" + self.data_blks = [] + want_seq_num = 1 + total_size = 0 + is_ffs = self.volume.is_ffs + byte_size = self.block.byte_size + data = bytearray() + for blk in self.data_blk_nums: + if is_ffs: + # ffs has raw data blocks + dat_blk = self.volume.blkdev.read_block(blk) + total_size += len(dat_blk) + # shrink last read if necessary + if total_size > byte_size: + shrink = total_size - byte_size + dat_blk = dat_blk[:-shrink] + total_size = byte_size + data += dat_blk + else: + # ofs + dat_blk = FileDataBlock(self.block.blkdev, blk) + dat_blk.read() + if not dat_blk.valid: + raise FSError(INVALID_FILE_DATA_BLOCK, block=dat_blk, node=self) + # check sequence number + if dat_blk.seq_num != want_seq_num: + raise FSError( + INVALID_SEQ_NUM, + block=dat_blk, + node=self, + extra="got=%d wanted=%d" % (dat_blk.seq_num, want_seq_num), + ) + # store data blocks + self.data_blks.append(dat_blk) + total_size += dat_blk.data_size + data += dat_blk.get_block_data() + want_seq_num += 1 + # store full contents of file + self.data = data + # make sure all went well + got_size = len(data) + want_size = self.block.byte_size + if got_size != want_size: + raise FSError( + INTERNAL_ERROR, + block=self.block, + node=self, + extra="file size mismatch: got=%d want=%d" % (got_size, want_size), + ) + + def get_file_data(self): + if self.data != None: + return self.data + self.read() + return self.data + + def flush(self): + self.data = None + self.data_blks = None + + def ensure_data(self): + if self.data == None: + self.read() + + def set_file_data(self, data): + self.data = data + self.data_size = len(data) + self.num_data_blks = self.calc_number_of_data_blks() + self.num_ext_blks = self.calc_number_of_list_blks() + + def get_data_block_contents_bytes(self): + """how many bytes of file data can be stored in a block?""" + bb = self.volume.blkdev.block_bytes + if self.volume.is_ffs: + return bb + else: + return bb - 24 + + def calc_number_of_data_blks(self): + """given the file size: how many data blocks do we need to store the file?""" + bb = self.get_data_block_contents_bytes() + ds = self.data_size + return (ds + bb - 1) // bb + + def calc_number_of_list_blks(self): + """given the file size: how many list blocks do we need to store the data blk ptrs?""" + db = self.calc_number_of_data_blks() + # ptr per block + ppb = self.volume.blkdev.block_longs - 56 + # fits in header block? + if db <= ppb: + return 0 else: - next_data = self.data_blk_nums[blk_idx+1] - fdb.create(self.block.blk_num, blk_idx+1, d, next_data) - fdb.write() - self.data_blks.append(fdb) - blk_idx += 1 - off += bs - left -= bs - - def draw_on_bitmap(self, bm, show_all=False, first=False): - bm[self.block.blk_num] = 'H' - for b in self.ext_blk_nums: - bm[b] = 'E' - for b in self.data_blk_nums: - bm[b] = 'd' - - def get_block_nums(self): - result = [self.block.blk_num] - result += self.ext_blk_nums - result += self.data_blk_nums - return result - - def get_blocks(self, with_data=True): - result = [self.block] - result += self.ext_blks - if with_data: - self.ensure_data() - result += self.data_blks - return result - - def can_delete(self): - return True - - def get_size(self): - return self.data_size - - def get_size_str(self): - return "%8d" % self.data_size - - def get_detail_str(self): - return "data=%d ext=%d" % (len(self.data_blk_nums), len(self.ext_blk_nums)) - - def get_block_usage(self, all=False, first=True): - return (len(self.data_blk_nums), len(self.ext_blk_nums) + 1) - - def get_file_bytes(self, all=False, first=True): - return self.data_size - - def is_file(self): - return True + db -= ppb + return (db + ppb - 1) // ppb + + def blocks_get_create_num(self): + # determine number of blocks to create + return 1 + self.num_data_blks + self.num_ext_blks + + def blocks_create_new(self, free_blks, name, hash_chain_blk, parent_blk, meta_info): + # assign block numbers + fhb_num = free_blks[0] + # ... for ext + self.ext_blk_nums = [] + for i in range(self.num_ext_blks): + self.ext_blk_nums.append(free_blks[1 + i]) + # ... for data + off = 1 + self.num_ext_blks + self.data_blk_nums = [] + for i in range(self.num_data_blks): + self.data_blk_nums.append(free_blks[off]) + off += 1 + + ppb = self.volume.blkdev.block_longs - 56 # data pointer per block + + # create file header block + fhb = FileHeaderBlock(self.blkdev, fhb_num, self.volume.is_longname) + byte_size = len(self.data) + if self.num_data_blks > ppb: + hdr_blks = self.data_blk_nums[0:ppb] + hdr_ext = self.ext_blk_nums[0] + else: + hdr_blks = self.data_blk_nums + hdr_ext = 0 + + fhb.create( + parent_blk, + name, + hdr_blks, + hdr_ext, + byte_size, + meta_info.get_protect(), + meta_info.get_comment(), + meta_info.get_mod_ts(), + hash_chain_blk, + ) + fhb.write() + self.set_block(fhb) + + # create file list (=ext) blocks + ext_off = ppb + for i in range(self.num_ext_blks): + flb = FileListBlock(self.blkdev, self.ext_blk_nums[i]) + if i == self.num_ext_blks - 1: + ext_blk = 0 + blks = self.data_blk_nums[ext_off:] + else: + ext_blk = self.ext_blk_nums[i + 1] + blks = self.data_blk_nums[ext_off : ext_off + ppb] + flb.create(fhb_num, blks, ext_blk) + flb.write() + self.ext_blks.append(flb) + ext_off += ppb + + # write data blocks + self.write() + + self.valid = True + return fhb_num + + def write(self): + self.data_blks = [] + off = 0 + left = self.data_size + blk_idx = 0 + bs = self.get_data_block_contents_bytes() + is_ffs = self.volume.is_ffs + while off < self.data_size: + # number of data block + blk_num = self.data_blk_nums[blk_idx] + # extract file data + size = left + if size > bs: + size = bs + d = self.data[off : off + size] + if is_ffs: + # pad block + if size < bs: + d += b"\0" * (bs - size) + # write raw block data in FFS + self.blkdev.write_block(blk_num, d) + else: + # old FS: create and write data block + fdb = FileDataBlock(self.blkdev, blk_num) + if blk_idx == self.num_data_blks - 1: + next_data = 0 + else: + next_data = self.data_blk_nums[blk_idx + 1] + fdb.create(self.block.blk_num, blk_idx + 1, d, next_data) + fdb.write() + self.data_blks.append(fdb) + blk_idx += 1 + off += bs + left -= bs + + def draw_on_bitmap(self, bm, show_all=False, first=False): + bm[self.block.blk_num] = "H" + for b in self.ext_blk_nums: + bm[b] = "E" + for b in self.data_blk_nums: + bm[b] = "d" + + def get_block_nums(self): + result = [self.block.blk_num] + result += self.ext_blk_nums + result += self.data_blk_nums + return result + + def get_blocks(self, with_data=True): + result = [self.block] + result += self.ext_blks + if with_data: + self.ensure_data() + result += self.data_blks + return result + + def can_delete(self): + return True + + def get_size(self): + return self.data_size + + def get_size_str(self): + return "%8d" % self.data_size + + def get_detail_str(self): + return "data=%d ext=%d" % (len(self.data_blk_nums), len(self.ext_blk_nums)) + + def get_block_usage(self, all=False, first=True): + return (len(self.data_blk_nums), len(self.ext_blk_nums) + 1) + + def get_file_bytes(self, all=False, first=True): + return self.data_size + + def is_file(self): + return True diff --git a/amitools/fs/ADFSNode.py b/amitools/fs/ADFSNode.py index 6731ea66..c2959699 100644 --- a/amitools/fs/ADFSNode.py +++ b/amitools/fs/ADFSNode.py @@ -1,6 +1,3 @@ - - - from .block.CommentBlock import CommentBlock from .block.EntryBlock import EntryBlock from .FileName import FileName @@ -11,206 +8,228 @@ from .FSString import FSString import amitools.util.ByteSize as ByteSize + class ADFSNode: - def __init__(self, volume, parent): - self.volume = volume - self.blkdev = volume.blkdev - self.parent = parent - self.block_bytes = self.blkdev.block_bytes - self.block = None - self.name = None - self.valid = False - self.meta_info = None - - def __str__(self): - return "%s:'%s'(@%d)" % (self.__class__.__name__, self.get_node_path_name(), self.block.blk_num) - - def set_block(self, block): - self.block = block - self.name = FileName(self.block.name, is_intl=self.volume.is_intl,is_longname=self.volume.is_longname) - self.valid = True - self.create_meta_info() - - def create_meta_info(self): - if self.block.comment_block_id != 0: - comment_block = CommentBlock(self.blkdev, self.block.comment_block_id) - comment_block.read() - comment = comment_block.comment - else: - comment = self.block.comment - self.meta_info = MetaInfo(self.block.protect, self.block.mod_ts, comment) - - def get_file_name(self): - return self.name - - def delete(self, wipe=False, all=False, update_ts=True): - if all: - self.delete_children(wipe, all, update_ts) - self.parent._delete(self, wipe, update_ts) - - def delete_children(self, wipe, all, update_ts): - pass - - def get_meta_info(self): - return self.meta_info - - def change_meta_info(self, meta_info): - dirty = False - - # dircache? - rebuild_dircache = False - if self.volume.is_dircache and self.parent: - record = self.parent.get_dircache_record(self.name.get_name()) - if not record: - raise FSError(INTERNAL_ERROR, node=self, extra="dc not found!") - else: - record = None - - # alter protect flags - protect = meta_info.get_protect() - if protect and hasattr(self.block, 'protect'): - self.block.protect = protect - self.meta_info.set_protect(protect) - dirty = True - if record: - record.protect = protect - - # alter mod time - mod_ts = meta_info.get_mod_ts() - if mod_ts: - self.block.mod_ts = mod_ts - self.meta_info.set_mod_ts(mod_ts) - dirty = True - if record: - record.mod_ts = mod_ts - - # alter comment - comment = meta_info.get_comment() - if comment and hasattr(self.block, "comment"): - if EntryBlock.needs_extra_comment_block(self.name, comment): - if self.block.comment_block_id == 0: - # Allocate and initialize extra block for comment - blks = self.volume.bitmap.alloc_n(1) - if blks is not None: - cblk = CommentBlock(self.blkdev, blks[0]) - cblk.create(self.block.blk_num) - self.block.comment_block_id = cblk.blk_num - else: - raise FSError(NO_FREE_BLOCKS, node=self) - else: - cblk = CommentBlock(self.blkdev, self.block.comment_block_id) - cblk.read() - cblk.comment = comment - cblk.write() - else: - self.block.comment = comment + def __init__(self, volume, parent): + self.volume = volume + self.blkdev = volume.blkdev + self.parent = parent + self.block_bytes = self.blkdev.block_bytes + self.block = None + self.name = None + self.valid = False + self.meta_info = None + + def __str__(self): + return "%s:'%s'(@%d)" % ( + self.__class__.__name__, + self.get_node_path_name(), + self.block.blk_num, + ) + + def set_block(self, block): + self.block = block + self.name = FileName( + self.block.name, + is_intl=self.volume.is_intl, + is_longname=self.volume.is_longname, + ) + self.valid = True + self.create_meta_info() + + def create_meta_info(self): if self.block.comment_block_id != 0: - self.volume.bitmap.dealloc_n([self.block.comment_block_id]) - self.block.comment_block_id = 0 - - self.meta_info.set_comment(comment) - dirty = True - if record: - rebuild_dircache = len(record.comment) < comment - record.comment = comment - - # really need update? - if dirty: - self.block.write() - # dirache update - if record: - self.parent.update_dircache_record(record, rebuild_dircache) - - def change_comment(self, comment): - self.change_meta_info(MetaInfo(comment=comment)) - - def change_protect(self, protect): - self.change_meta_info(MetaInfo(protect=protect)) - - def change_protect_by_string(self, pr_str): - p = ProtectFlags() - p.parse(pr_str) - self.change_protect(p.mask) - - def change_mod_ts(self, mod_ts): - self.change_meta_info(MetaInfo(mod_ts=mod_ts)) - - def change_mod_ts_by_string(self, tm_str): - t = TimeStamp() - t.parse(tm_str) - self.change_meta_info(MetaInfo(mod_ts=t)) - - def get_list_str(self, indent=0, all=False, detail=False): - istr = ' ' * indent - if detail: - extra = self.get_detail_str() - else: - extra = self.meta_info.get_str_line() - return '%-40s %8s %s' % (istr + self.name.get_unicode_name(), self.get_size_str(), extra) - - def list(self, indent=0, all=False, detail=False, encoding="UTF-8"): - print(self.get_list_str(indent=indent, all=all, detail=detail)) - - def get_size_str(self): - # re-implemented in derived classes! - return "" - - def get_blocks(self, with_data=False): - # re-implemented in derived classes! - return 0 - - def get_file_data(self): - return None - - def dump_blocks(self, with_data=False): - blks = self.get_blocks(with_data) - for b in blks: - b.dump() - - def get_node_path(self, with_vol=False): - if self.parent != None: - if not with_vol and self.parent.parent == None: - r = [] - else: - r = self.parent.get_node_path() - else: - if not with_vol: - return [] - r = [] - r.append(self.name.get_unicode_name()) - return r - - def get_node_path_name(self, with_vol=False): - r = self.get_node_path() - return FSString("/".join(r)) - - def get_detail_str(self): - return "" - - def get_block_usage(self, all=False, first=True): - return (0,0) - - def get_file_bytes(self, all=False, first=True): - return (0,0) - - def is_file(self): - return False - - def is_dir(self): - return False - - def get_info(self, all=False): - # block usage: data + fs blocks - (data,fs) = self.get_block_usage(all=all) - total = data + fs - bb = self.blkdev.block_bytes - btotal = total * bb - bdata = data * bb - bfs = fs * bb - prc_data = 10000 * data / total - prc_fs = 10000 - prc_data - res = [] - res.append("sum: %10d %s %12d" % (total, ByteSize.to_byte_size_str(btotal), btotal)) - res.append("data: %10d %s %12d %5.2f%%" % (data, ByteSize.to_byte_size_str(bdata), bdata, prc_data / 100.0)) - res.append("fs: %10d %s %12d %5.2f%%" % (fs, ByteSize.to_byte_size_str(bfs), bfs, prc_fs / 100.0)) - return res + comment_block = CommentBlock(self.blkdev, self.block.comment_block_id) + comment_block.read() + comment = comment_block.comment + else: + comment = self.block.comment + self.meta_info = MetaInfo(self.block.protect, self.block.mod_ts, comment) + + def get_file_name(self): + return self.name + + def delete(self, wipe=False, all=False, update_ts=True): + if all: + self.delete_children(wipe, all, update_ts) + self.parent._delete(self, wipe, update_ts) + + def delete_children(self, wipe, all, update_ts): + pass + + def get_meta_info(self): + return self.meta_info + + def change_meta_info(self, meta_info): + dirty = False + + # dircache? + rebuild_dircache = False + if self.volume.is_dircache and self.parent: + record = self.parent.get_dircache_record(self.name.get_name()) + if not record: + raise FSError(INTERNAL_ERROR, node=self, extra="dc not found!") + else: + record = None + + # alter protect flags + protect = meta_info.get_protect() + if protect and hasattr(self.block, "protect"): + self.block.protect = protect + self.meta_info.set_protect(protect) + dirty = True + if record: + record.protect = protect + + # alter mod time + mod_ts = meta_info.get_mod_ts() + if mod_ts: + self.block.mod_ts = mod_ts + self.meta_info.set_mod_ts(mod_ts) + dirty = True + if record: + record.mod_ts = mod_ts + + # alter comment + comment = meta_info.get_comment() + if comment and hasattr(self.block, "comment"): + if EntryBlock.needs_extra_comment_block(self.name, comment): + if self.block.comment_block_id == 0: + # Allocate and initialize extra block for comment + blks = self.volume.bitmap.alloc_n(1) + if blks is not None: + cblk = CommentBlock(self.blkdev, blks[0]) + cblk.create(self.block.blk_num) + self.block.comment_block_id = cblk.blk_num + else: + raise FSError(NO_FREE_BLOCKS, node=self) + else: + cblk = CommentBlock(self.blkdev, self.block.comment_block_id) + cblk.read() + cblk.comment = comment + cblk.write() + else: + self.block.comment = comment + if self.block.comment_block_id != 0: + self.volume.bitmap.dealloc_n([self.block.comment_block_id]) + self.block.comment_block_id = 0 + + self.meta_info.set_comment(comment) + dirty = True + if record: + rebuild_dircache = len(record.comment) < comment + record.comment = comment + + # really need update? + if dirty: + self.block.write() + # dirache update + if record: + self.parent.update_dircache_record(record, rebuild_dircache) + + def change_comment(self, comment): + self.change_meta_info(MetaInfo(comment=comment)) + + def change_protect(self, protect): + self.change_meta_info(MetaInfo(protect=protect)) + + def change_protect_by_string(self, pr_str): + p = ProtectFlags() + p.parse(pr_str) + self.change_protect(p.mask) + + def change_mod_ts(self, mod_ts): + self.change_meta_info(MetaInfo(mod_ts=mod_ts)) + + def change_mod_ts_by_string(self, tm_str): + t = TimeStamp() + t.parse(tm_str) + self.change_meta_info(MetaInfo(mod_ts=t)) + + def get_list_str(self, indent=0, all=False, detail=False): + istr = " " * indent + if detail: + extra = self.get_detail_str() + else: + extra = self.meta_info.get_str_line() + return "%-40s %8s %s" % ( + istr + self.name.get_unicode_name(), + self.get_size_str(), + extra, + ) + + def list(self, indent=0, all=False, detail=False, encoding="UTF-8"): + print(self.get_list_str(indent=indent, all=all, detail=detail)) + + def get_size_str(self): + # re-implemented in derived classes! + return "" + + def get_blocks(self, with_data=False): + # re-implemented in derived classes! + return 0 + + def get_file_data(self): + return None + + def dump_blocks(self, with_data=False): + blks = self.get_blocks(with_data) + for b in blks: + b.dump() + + def get_node_path(self, with_vol=False): + if self.parent != None: + if not with_vol and self.parent.parent == None: + r = [] + else: + r = self.parent.get_node_path() + else: + if not with_vol: + return [] + r = [] + r.append(self.name.get_unicode_name()) + return r + + def get_node_path_name(self, with_vol=False): + r = self.get_node_path() + return FSString("/".join(r)) + + def get_detail_str(self): + return "" + + def get_block_usage(self, all=False, first=True): + return (0, 0) + + def get_file_bytes(self, all=False, first=True): + return (0, 0) + + def is_file(self): + return False + + def is_dir(self): + return False + + def get_info(self, all=False): + # block usage: data + fs blocks + (data, fs) = self.get_block_usage(all=all) + total = data + fs + bb = self.blkdev.block_bytes + btotal = total * bb + bdata = data * bb + bfs = fs * bb + prc_data = 10000 * data / total + prc_fs = 10000 - prc_data + res = [] + res.append( + "sum: %10d %s %12d" + % (total, ByteSize.to_byte_size_str(btotal), btotal) + ) + res.append( + "data: %10d %s %12d %5.2f%%" + % (data, ByteSize.to_byte_size_str(bdata), bdata, prc_data / 100.0) + ) + res.append( + "fs: %10d %s %12d %5.2f%%" + % (fs, ByteSize.to_byte_size_str(bfs), bfs, prc_fs / 100.0) + ) + return res diff --git a/amitools/fs/ADFSVolDir.py b/amitools/fs/ADFSVolDir.py index 538c3148..ce74bb5d 100644 --- a/amitools/fs/ADFSVolDir.py +++ b/amitools/fs/ADFSVolDir.py @@ -1,39 +1,40 @@ - - - from .ADFSDir import ADFSDir from .MetaInfo import MetaInfo from . import DosType -class ADFSVolDir(ADFSDir): - def __init__(self, volume, root_block): - ADFSDir.__init__(self, volume, None) - self.set_block(root_block) - self._init_name_hash() - - def __repr__(self): - return "[VolDir(%d)'%s':%s]" % (self.block.blk_num, self.block.name, self.entries) - - def draw_on_bitmap(self, bm, show_all=False, first=True): - blk_num = self.block.blk_num - bm[blk_num] = 'V' - if show_all or first: - self.ensure_entries() - for e in self.entries: - e.draw_on_bitmap(bm, show_all, False) - - def get_size_str(self): - return "VOLUME" - - def create_meta_info(self): - self.meta_info = MetaInfo(mod_ts=self.block.mod_ts) - - def can_delete(self): - return False - - def get_list_str(self, indent=0, all=False, detail=False): - a = ADFSDir.get_list_str(self, indent=indent, all=all, detail=detail) - a += DosType.get_dos_type_str(self.volume.get_dos_type()) - a += " #%d" % self.block_bytes - return a +class ADFSVolDir(ADFSDir): + def __init__(self, volume, root_block): + ADFSDir.__init__(self, volume, None) + self.set_block(root_block) + self._init_name_hash() + + def __repr__(self): + return "[VolDir(%d)'%s':%s]" % ( + self.block.blk_num, + self.block.name, + self.entries, + ) + + def draw_on_bitmap(self, bm, show_all=False, first=True): + blk_num = self.block.blk_num + bm[blk_num] = "V" + if show_all or first: + self.ensure_entries() + for e in self.entries: + e.draw_on_bitmap(bm, show_all, False) + + def get_size_str(self): + return "VOLUME" + + def create_meta_info(self): + self.meta_info = MetaInfo(mod_ts=self.block.mod_ts) + + def can_delete(self): + return False + + def get_list_str(self, indent=0, all=False, detail=False): + a = ADFSDir.get_list_str(self, indent=indent, all=all, detail=detail) + a += DosType.get_dos_type_str(self.volume.get_dos_type()) + a += " #%d" % self.block_bytes + return a diff --git a/amitools/fs/ADFSVolume.py b/amitools/fs/ADFSVolume.py index dbbd8d43..83414a31 100644 --- a/amitools/fs/ADFSVolume.py +++ b/amitools/fs/ADFSVolume.py @@ -1,6 +1,3 @@ - - - from .block.BootBlock import BootBlock from .block.RootBlock import RootBlock from .ADFSVolDir import ADFSVolDir @@ -13,342 +10,371 @@ from . import DosType import amitools.util.ByteSize as ByteSize + class ADFSVolume: - - def __init__(self, blkdev): - self.blkdev = blkdev - - self.boot = None - self.root = None - self.root_dir = None - self.bitmap = None - - self.valid = False - self.is_ffs = None - self.is_intl = None - self.is_dircache = None - self.is_longname = None - self.name = None - self.meta_info = None - - def open(self): - # read boot block - self.boot = BootBlock(self.blkdev) - self.boot.read() - # valid root block? - if self.boot.valid: - # get fs flags - dos_type = self.boot.dos_type - self.is_ffs = DosType.is_ffs(dos_type) - self.is_intl = DosType.is_intl(dos_type) - self.is_dircache = DosType.is_dircache(dos_type) - self.is_longname = DosType.is_longname(dos_type) - # read root - self.root = RootBlock(self.blkdev, self.boot.calc_root_blk) - self.root.read() - if self.root.valid: - self.name = self.root.name - # build meta info - self.meta_info = RootMetaInfo( self.root.create_ts, self.root.disk_ts, self.root.mod_ts ) - # create root dir - self.root_dir = ADFSVolDir(self, self.root) - self.root_dir.read() + def __init__(self, blkdev): + self.blkdev = blkdev + + self.boot = None + self.root = None + self.root_dir = None + self.bitmap = None + + self.valid = False + self.is_ffs = None + self.is_intl = None + self.is_dircache = None + self.is_longname = None + self.name = None + self.meta_info = None + + def open(self): + # read boot block + self.boot = BootBlock(self.blkdev) + self.boot.read() + # valid root block? + if self.boot.valid: + # get fs flags + dos_type = self.boot.dos_type + self.is_ffs = DosType.is_ffs(dos_type) + self.is_intl = DosType.is_intl(dos_type) + self.is_dircache = DosType.is_dircache(dos_type) + self.is_longname = DosType.is_longname(dos_type) + # read root + self.root = RootBlock(self.blkdev, self.boot.calc_root_blk) + self.root.read() + if self.root.valid: + self.name = self.root.name + # build meta info + self.meta_info = RootMetaInfo( + self.root.create_ts, self.root.disk_ts, self.root.mod_ts + ) + # create root dir + self.root_dir = ADFSVolDir(self, self.root) + self.root_dir.read() + # create bitmap + self.bitmap = ADFSBitmap(self.root) + self.bitmap.read() + self.valid = True + else: + raise FSError(INVALID_ROOT_BLOCK, block=self.root) + else: + raise FSError(INVALID_BOOT_BLOCK, block=self.boot) + + def create( + self, + name, + meta_info=None, + dos_type=None, + boot_code=None, + is_ffs=False, + is_intl=False, + is_dircache=False, + is_longname=False, + ): + # determine dos_type + if dos_type == None: + dos_type = DosType.DOS0 + if is_longname: + dos_type = DosType.DOS6 + elif is_dircache: + dos_type |= DosType.DOS_MASK_DIRCACHE + elif is_intl: + dos_type |= DosType.DOS_MASK_INTL + if is_ffs: + dos_type |= DosType.DOS_MASK_FFS + # update flags + self.is_ffs = DosType.is_ffs(dos_type) + self.is_intl = DosType.is_intl(dos_type) + self.is_dircache = DosType.is_dircache(dos_type) + self.is_longname = DosType.is_longname(dos_type) + # convert and check volume name + if not isinstance(name, FSString): + raise ValueError("create's name must be a FSString") + fn = FileName( + name, is_intl=self.is_intl, is_longname=False + ) # Volumes don't support long names + if not fn.is_valid(): + raise FSError(INVALID_VOLUME_NAME, file_name=name, node=self) + # create a boot block + self.boot = BootBlock(self.blkdev) + self.boot.create(dos_type=dos_type, boot_code=boot_code) + self.boot.write() + # create a root block + self.root = RootBlock(self.blkdev, self.boot.calc_root_blk) + if meta_info == None: + meta_info = RootMetaInfo() + meta_info.set_current_as_create_time() + meta_info.set_current_as_mod_time() + meta_info.set_current_as_disk_time() + create_ts = meta_info.get_create_ts() + disk_ts = meta_info.get_disk_ts() + mod_ts = meta_info.get_mod_ts() + self.meta_info = meta_info + self.root.create(fn.get_name(), create_ts, disk_ts, mod_ts, fstype=dos_type) + self.name = name # create bitmap self.bitmap = ADFSBitmap(self.root) - self.bitmap.read() + self.bitmap.create() + self.bitmap.write() # writes root block, too + # create empty root dir + self.root_dir = ADFSVolDir(self, self.root) + self.root_dir.read() + # all ok self.valid = True - else: - raise FSError(INVALID_ROOT_BLOCK, block=self.root) - else: - raise FSError(INVALID_BOOT_BLOCK, block=self.boot) - - def create(self, name, meta_info=None, dos_type=None, boot_code=None, is_ffs=False, is_intl=False, is_dircache=False, is_longname=False): - # determine dos_type - if dos_type == None: - dos_type = DosType.DOS0 - if is_longname: - dos_type = DosType.DOS6 - elif is_dircache: - dos_type |= DosType.DOS_MASK_DIRCACHE - elif is_intl: - dos_type |= DosType.DOS_MASK_INTL - if is_ffs: - dos_type |= DosType.DOS_MASK_FFS - # update flags - self.is_ffs = DosType.is_ffs(dos_type) - self.is_intl = DosType.is_intl(dos_type) - self.is_dircache = DosType.is_dircache(dos_type) - self.is_longname = DosType.is_longname(dos_type) - # convert and check volume name - if not isinstance(name, FSString): - raise ValueError("create's name must be a FSString") - fn = FileName(name, is_intl=self.is_intl, is_longname=False) # Volumes don't support long names - if not fn.is_valid(): - raise FSError(INVALID_VOLUME_NAME, file_name=name, node=self) - # create a boot block - self.boot = BootBlock(self.blkdev) - self.boot.create(dos_type=dos_type, boot_code=boot_code) - self.boot.write() - # create a root block - self.root = RootBlock(self.blkdev, self.boot.calc_root_blk) - if meta_info == None: - meta_info = RootMetaInfo() - meta_info.set_current_as_create_time() - meta_info.set_current_as_mod_time() - meta_info.set_current_as_disk_time() - create_ts = meta_info.get_create_ts() - disk_ts = meta_info.get_disk_ts() - mod_ts = meta_info.get_mod_ts() - self.meta_info = meta_info - self.root.create(fn.get_name(), create_ts, disk_ts, mod_ts, fstype=dos_type) - self.name = name - # create bitmap - self.bitmap = ADFSBitmap(self.root) - self.bitmap.create() - self.bitmap.write() # writes root block, too - # create empty root dir - self.root_dir = ADFSVolDir(self, self.root) - self.root_dir.read() - # all ok - self.valid = True - - def close(self): - # flush bitmap state (if it was dirty) - if self.bitmap: - self.bitmap.write() - - def get_info(self): - """return an array of strings with information on the volume""" - res = [] - total = self.get_total_blocks() - free = self.get_free_blocks() - used = total - free - bb = self.blkdev.block_bytes - btotal = total * bb - bfree = free * bb - bused = used * bb - prc_free = 10000 * free / total - prc_used = 10000 - prc_free - res.append("total: %10d %s %12d" % (total, ByteSize.to_byte_size_str(btotal), btotal)) - res.append("used: %10d %s %12d %5.2f%%" % (used, ByteSize.to_byte_size_str(bused), bused, prc_used / 100.0)) - res.append("free: %10d %s %12d %5.2f%%" % (free, ByteSize.to_byte_size_str(bfree), bfree, prc_free / 100.0)) - return res - - # ----- Path Queries ----- - - def get_path_name(self, path_name, allow_file=True, allow_dir=True): - """get node for given path""" - # make sure path name is a FSString - if not isinstance(path_name, FSString): - raise ValueError("get_path_name's path must be a FSString") - # create and check file name - fn = FileName(path_name, is_intl=self.is_intl, is_longname=self.is_longname) - if not fn.is_valid(): - raise FSError(INVALID_FILE_NAME, file_name=path_name, node=self) - # find node - if fn.is_root_path_alias(): - # its the root node - return self.root_dir - else: - # find a sub node - path = fn.split_path() - return self.root_dir.get_path(path, allow_file, allow_dir) - - def get_dir_path_name(self, path_name): - """get node for given path and ensure its a directory""" - return self.get_path_name(path_name, allow_file=False) - - def get_file_path_name(self, path_name): - """get node for given path and ensure its a file""" - return self.get_path_name(path_name, allow_dir=False) - - def get_create_path_name(self, path_name, suggest_name=None): - """get a parent node and path name for creation + + def close(self): + # flush bitmap state (if it was dirty) + if self.bitmap: + self.bitmap.write() + + def get_info(self): + """return an array of strings with information on the volume""" + res = [] + total = self.get_total_blocks() + free = self.get_free_blocks() + used = total - free + bb = self.blkdev.block_bytes + btotal = total * bb + bfree = free * bb + bused = used * bb + prc_free = 10000 * free / total + prc_used = 10000 - prc_free + res.append( + "total: %10d %s %12d" + % (total, ByteSize.to_byte_size_str(btotal), btotal) + ) + res.append( + "used: %10d %s %12d %5.2f%%" + % (used, ByteSize.to_byte_size_str(bused), bused, prc_used / 100.0) + ) + res.append( + "free: %10d %s %12d %5.2f%%" + % (free, ByteSize.to_byte_size_str(bfree), bfree, prc_free / 100.0) + ) + return res + + # ----- Path Queries ----- + + def get_path_name(self, path_name, allow_file=True, allow_dir=True): + """get node for given path""" + # make sure path name is a FSString + if not isinstance(path_name, FSString): + raise ValueError("get_path_name's path must be a FSString") + # create and check file name + fn = FileName(path_name, is_intl=self.is_intl, is_longname=self.is_longname) + if not fn.is_valid(): + raise FSError(INVALID_FILE_NAME, file_name=path_name, node=self) + # find node + if fn.is_root_path_alias(): + # its the root node + return self.root_dir + else: + # find a sub node + path = fn.split_path() + return self.root_dir.get_path(path, allow_file, allow_dir) + + def get_dir_path_name(self, path_name): + """get node for given path and ensure its a directory""" + return self.get_path_name(path_name, allow_file=False) + + def get_file_path_name(self, path_name): + """get node for given path and ensure its a file""" + return self.get_path_name(path_name, allow_dir=False) + + def get_create_path_name(self, path_name, suggest_name=None): + """get a parent node and path name for creation return: parent_node_or_none, file_name_or_none """ - # make sure input is correct - if not isinstance(path_name, FSString): - raise ValueError("get_create_path_name's path_name must be a FSString") - if suggest_name != None and not isinstance(suggest_name, FSString): - raise ValueError("get_create_path_name's suggest_name must be a FSString") - # is root path? - fn = FileName(path_name, is_intl=self.is_intl, is_longname=self.is_longname) - if not fn.is_valid(): - raise FSError(INVALID_FILE_NAME, file_name=path_name, node=self) - # find node - if fn.is_root_path_alias(): - return self.root_dir, suggest_name - else: - # try to get path_name as a directory - node = self.get_dir_path_name(path_name) - if node != None: - return node, suggest_name - else: - # split into dir and file name - dn, fn = fn.get_dir_and_base_name() - if dn != None: - # has a directory -> try to fetch it - node = self.get_dir_path_name(dn) + # make sure input is correct + if not isinstance(path_name, FSString): + raise ValueError("get_create_path_name's path_name must be a FSString") + if suggest_name != None and not isinstance(suggest_name, FSString): + raise ValueError("get_create_path_name's suggest_name must be a FSString") + # is root path? + fn = FileName(path_name, is_intl=self.is_intl, is_longname=self.is_longname) + if not fn.is_valid(): + raise FSError(INVALID_FILE_NAME, file_name=path_name, node=self) + # find node + if fn.is_root_path_alias(): + return self.root_dir, suggest_name else: - # no dir -> assume root dir - node = self.root_dir - if fn != None: - # take given name - return node, fn + # try to get path_name as a directory + node = self.get_dir_path_name(path_name) + if node != None: + return node, suggest_name + else: + # split into dir and file name + dn, fn = fn.get_dir_and_base_name() + if dn != None: + # has a directory -> try to fetch it + node = self.get_dir_path_name(dn) + else: + # no dir -> assume root dir + node = self.root_dir + if fn != None: + # take given name + return node, fn + else: + # use suggested name + return node, suggest_name + + # ----- convenience API ----- + + def get_volume_name(self): + return self.name + + def get_root_dir(self): + return self.root_dir + + def get_dos_type(self): + return self.boot.dos_type + + def get_boot_code(self): + return self.boot.boot_code + + def get_free_blocks(self): + return self.bitmap.get_num_free() + + def get_used_blocks(self): + free = self.bitmap.get_num_free() + total = self.blkdev.num_blocks + return total - free + + def get_total_blocks(self): + return self.blkdev.num_blocks + + def get_meta_info(self): + return self.meta_info + + def update_disk_time(self): + mi = RootMetaInfo() + mi.set_current_as_disk_time() + self.change_meta_info(mi) + + def change_meta_info(self, meta_info): + if self.root != None and self.root.valid: + dirty = False + # update create_ts + create_ts = meta_info.get_create_ts() + if create_ts != None: + self.root.create_ts = meta_info.get_create_ts() + dirty = True + # update disk_ts + disk_ts = meta_info.get_disk_ts() + if disk_ts != None: + self.root.disk_ts = disk_ts + dirty = True + # update mod_ts + mod_ts = meta_info.get_mod_ts() + if mod_ts != None: + self.root.mod_ts = mod_ts + dirty = True + # update if something changed + if dirty: + self.root.write() + self.meta_info = RootMetaInfo( + self.root.create_ts, self.root.disk_ts, self.root.mod_ts + ) + return True else: - # use suggested name - return node, suggest_name - - # ----- convenience API ----- - - def get_volume_name(self): - return self.name - - def get_root_dir(self): - return self.root_dir - - def get_dos_type(self): - return self.boot.dos_type - - def get_boot_code(self): - return self.boot.boot_code - - def get_free_blocks(self): - return self.bitmap.get_num_free() - - def get_used_blocks(self): - free = self.bitmap.get_num_free() - total = self.blkdev.num_blocks - return total - free - - def get_total_blocks(self): - return self.blkdev.num_blocks - - def get_meta_info(self): - return self.meta_info - - def update_disk_time(self): - mi = RootMetaInfo() - mi.set_current_as_disk_time() - self.change_meta_info(mi) - - def change_meta_info(self, meta_info): - if self.root != None and self.root.valid: - dirty = False - # update create_ts - create_ts = meta_info.get_create_ts() - if create_ts != None: - self.root.create_ts = meta_info.get_create_ts() - dirty = True - # update disk_ts - disk_ts = meta_info.get_disk_ts() - if disk_ts != None: - self.root.disk_ts = disk_ts - dirty = True - # update mod_ts - mod_ts = meta_info.get_mod_ts() - if mod_ts != None: - self.root.mod_ts = mod_ts - dirty = True - # update if something changed - if dirty: + return False + + def change_create_ts(self, create_ts): + return self.change_meta_info(RootMetaInfo(create_ts=create_ts)) + + def change_disk_ts(self, disk_ts): + return self.change_meta_info(RootMetaInfo(disk_ts=disk_ts)) + + def change_mod_ts(self, mod_ts): + return self.change_meta_info(RootMetaInfo(mod_ts=mod_ts)) + + def change_create_ts_by_string(self, create_ts_str): + t = TimeStamp() + t.parse(create_ts_str) + return self.change_meta_info(RootMetaInfo(create_ts=t)) + + def change_disk_ts_by_string(self, disk_ts_str): + t = TimeStamp() + t.parse(disk_ts_str) + return self.change_meta_info(RootMetaInfo(disk_ts=t)) + + def change_mod_ts_by_string(self, mod_ts_str): + t = TimeStamp() + t.parse(mod_ts_str) + return self.change_meta_info(RootMetaInfo(mod_ts=t)) + + def relabel(self, name): + """Relabel the volume""" + # make sure its a FSString + if not isinstance(name, FSString): + raise ValueError("relabel's name must be a FSString") + # validate file name + fn = FileName(name, is_intl=self.is_intl, is_longname=False) + if not fn.is_valid(): + raise FSError(INVALID_VOLUME_NAME, file_name=name, node=self) + # update root block + ami_name = name.get_ami_str() + self.root.name = ami_name self.root.write() - self.meta_info = RootMetaInfo( self.root.create_ts, self.root.disk_ts, self.root.mod_ts ) - return True - else: - return False - - def change_create_ts(self, create_ts): - return self.change_meta_info(RootMetaInfo(create_ts=create_ts)) - - def change_disk_ts(self, disk_ts): - return self.change_meta_info(RootMetaInfo(disk_ts=disk_ts)) - - def change_mod_ts(self, mod_ts): - return self.change_meta_info(RootMetaInfo(mod_ts=mod_ts)) - - def change_create_ts_by_string(self, create_ts_str): - t = TimeStamp() - t.parse(create_ts_str) - return self.change_meta_info(RootMetaInfo(create_ts=t)) - - def change_disk_ts_by_string(self, disk_ts_str): - t = TimeStamp() - t.parse(disk_ts_str) - return self.change_meta_info(RootMetaInfo(disk_ts=t)) - - def change_mod_ts_by_string(self, mod_ts_str): - t = TimeStamp() - t.parse(mod_ts_str) - return self.change_meta_info(RootMetaInfo(mod_ts=t)) - - def relabel(self, name): - """Relabel the volume""" - # make sure its a FSString - if not isinstance(name, FSString): - raise ValueError("relabel's name must be a FSString") - # validate file name - fn = FileName(name, is_intl=self.is_intl, is_longname=False) - if not fn.is_valid(): - raise FSError(INVALID_VOLUME_NAME, file_name=name, node=self) - # update root block - ami_name = name.get_ami_str() - self.root.name = ami_name - self.root.write() - # store internally - self.name = name - self.root_dir.name = name - - def create_dir(self, ami_path): - """Create a new directory""" - # make sure its a FSString - if not isinstance(ami_path, FSString): - raise ValueError("create_dir's ami_path must be a FSString") - # check file path - fn = FileName(ami_path, is_intl=self.is_intl, is_longname=self.is_longname) - if not fn.is_valid(): - raise FSError(INVALID_FILE_NAME, file_name=ami_path) - # split into dir and base name - dir_name, base_name = fn.get_dir_and_base_name() - if base_name == None: - raise FSError(INVALID_FILE_NAME, file_name=ami_path) - # find parent of dir - if dir_name == None: - node = self.root_dir - else: - # no parent dir found - node = self.get_dir_path_name(dir_name) - if node == None: - raise FSError(INVALID_PARENT_DIRECTORY, file_name=ami_path, extra="not found: "+dir_name) - node.create_dir(base_name) - - def write_file(self, data, ami_path, suggest_name=None, cache=False): - """Write given data as a file""" - # get parent node and file_name - parent_node, file_name = self.get_create_path_name(ami_path, suggest_name) - if parent_node == None: - raise FSError(INVALID_PARENT_DIRECTORY, file_name=ami_path) - if file_name == None: - raise FSError(INVALID_FILE_NAME, file_name=file_name) - # create file - node = parent_node.create_file(file_name, data) - if not cache: - node.flush() - - def read_file(self, ami_path, cache=False): - """Read a file and return data""" - # get node of file - node = self.get_file_path_name(ami_path) - if node == None: - raise FSError(FILE_NOT_FOUND, file_name=ami_path) - data = node.get_file_data() - if not cache: - node.flush() - return data - - def delete(self, ami_path, wipe=False, all=False): - """Delete a file or directory at given path""" - node = self.get_path_name(ami_path) - if node == None: - raise FSError(FILE_NOT_FOUND, file_name=ami_path) - node.delete(wipe=wipe, all=all) + # store internally + self.name = name + self.root_dir.name = name + + def create_dir(self, ami_path): + """Create a new directory""" + # make sure its a FSString + if not isinstance(ami_path, FSString): + raise ValueError("create_dir's ami_path must be a FSString") + # check file path + fn = FileName(ami_path, is_intl=self.is_intl, is_longname=self.is_longname) + if not fn.is_valid(): + raise FSError(INVALID_FILE_NAME, file_name=ami_path) + # split into dir and base name + dir_name, base_name = fn.get_dir_and_base_name() + if base_name == None: + raise FSError(INVALID_FILE_NAME, file_name=ami_path) + # find parent of dir + if dir_name == None: + node = self.root_dir + else: + # no parent dir found + node = self.get_dir_path_name(dir_name) + if node == None: + raise FSError( + INVALID_PARENT_DIRECTORY, + file_name=ami_path, + extra="not found: " + dir_name, + ) + node.create_dir(base_name) + + def write_file(self, data, ami_path, suggest_name=None, cache=False): + """Write given data as a file""" + # get parent node and file_name + parent_node, file_name = self.get_create_path_name(ami_path, suggest_name) + if parent_node == None: + raise FSError(INVALID_PARENT_DIRECTORY, file_name=ami_path) + if file_name == None: + raise FSError(INVALID_FILE_NAME, file_name=file_name) + # create file + node = parent_node.create_file(file_name, data) + if not cache: + node.flush() + + def read_file(self, ami_path, cache=False): + """Read a file and return data""" + # get node of file + node = self.get_file_path_name(ami_path) + if node == None: + raise FSError(FILE_NOT_FOUND, file_name=ami_path) + data = node.get_file_data() + if not cache: + node.flush() + return data + + def delete(self, ami_path, wipe=False, all=False): + """Delete a file or directory at given path""" + node = self.get_path_name(ami_path) + if node == None: + raise FSError(FILE_NOT_FOUND, file_name=ami_path) + node.delete(wipe=wipe, all=all) diff --git a/amitools/fs/DosType.py b/amitools/fs/DosType.py index 4cc359ca..5307fe99 100644 --- a/amitools/fs/DosType.py +++ b/amitools/fs/DosType.py @@ -1,14 +1,14 @@ """Helper functions and constants useable with DosTypes""" # raw dos types -DOS0 = 0x444f5300 -DOS1 = 0x444f5301 -DOS2 = 0x444f5302 -DOS3 = 0x444f5303 -DOS4 = 0x444f5304 -DOS5 = 0x444f5305 -DOS6 = 0x444f5306 -DOS7 = 0x444f5307 +DOS0 = 0x444F5300 +DOS1 = 0x444F5301 +DOS2 = 0x444F5302 +DOS3 = 0x444F5303 +DOS4 = 0x444F5304 +DOS5 = 0x444F5305 +DOS6 = 0x444F5306 +DOS7 = 0x444F5307 # more convenient dos type DOS_OFS = DOS0 @@ -22,14 +22,14 @@ # string names for dos types dos_type_names = [ - 'DOS0:ofs', - 'DOS1:ffs', - 'DOS2:ofs+intl', - 'DOS3:ffs+intl', - 'DOS4:ofs+intl+dircache', - 'DOS5:ffs+intl+dircache', - 'DOS6:ofs+intl+longname', - 'DOS7:ffs+intl+longname' + "DOS0:ofs", + "DOS1:ffs", + "DOS2:ofs+intl", + "DOS3:ffs+intl", + "DOS4:ofs+intl+dircache", + "DOS5:ffs+intl+dircache", + "DOS6:ofs+intl+longname", + "DOS7:ffs+intl+longname", ] # masks for modes @@ -37,103 +37,117 @@ DOS_MASK_INTL = 2 DOS_MASK_DIRCACHE = 4 + def parse_dos_type_str(string): - """parse a dos type string + """parse a dos type string return None if its invalid or dostype value """ - comp = string.split("+") - if "ffs" in comp: - if "dc" in comp or "dircache" in comp: - return DOS_FFS_INTL_DIRCACHE - elif "ln" in comp or "longname" in comp: - return DOS_FFS_INTL_LONGNAME - elif "intl" in comp: - return DOS_FFS_INTL - else: - return DOS_FFS - elif "ofs" in comp: - if "dc" in comp or "dircache" in comp: - return DOS_OFS_INTL_DIRCACHE - elif "ln" in comp or "longname" in comp: - return DOS_OFS_INTL_LONGNAME - elif "intl" in comp: - return DOS_OFS_INTL + comp = string.split("+") + if "ffs" in comp: + if "dc" in comp or "dircache" in comp: + return DOS_FFS_INTL_DIRCACHE + elif "ln" in comp or "longname" in comp: + return DOS_FFS_INTL_LONGNAME + elif "intl" in comp: + return DOS_FFS_INTL + else: + return DOS_FFS + elif "ofs" in comp: + if "dc" in comp or "dircache" in comp: + return DOS_OFS_INTL_DIRCACHE + elif "ln" in comp or "longname" in comp: + return DOS_OFS_INTL_LONGNAME + elif "intl" in comp: + return DOS_OFS_INTL + else: + return DOS_OFS else: - return DOS_OFS - else: - n = len(string) - # use 'DOS0' .. 'DOS7' - if n == 4 and string[0:3] == 'DOS': - off = ord(string[3]) - ord('0') - if off >= 0 and off <= 7: - return DOS0 + off - else: - return None - # other tag? - elif string[0].isalpha() and n==4: - return tag_str_to_num(string) - # use '0x01234567' hex value - elif string[0:2] == '0x': - try: - return int(string[2:],16) - except ValueError: - return None - # try number - else: - try: - return int(string) - except ValueError: - return None + n = len(string) + # use 'DOS0' .. 'DOS7' + if n == 4 and string[0:3] == "DOS": + off = ord(string[3]) - ord("0") + if off >= 0 and off <= 7: + return DOS0 + off + else: + return None + # other tag? + elif string[0].isalpha() and n == 4: + return tag_str_to_num(string) + # use '0x01234567' hex value + elif string[0:2] == "0x": + try: + return int(string[2:], 16) + except ValueError: + return None + # try number + else: + try: + return int(string) + except ValueError: + return None + def tag_str_to_num(s): - """Convert the DosType in a 4 letter tag string to 32 bit value""" - if len(s) != 4: - return 0 - a = ord(s[0]) << 24 - b = ord(s[1]) << 16 - c = ord(s[2]) << 8 - l = s[3] - d = ord(l) - if l.isdigit(): - d = d - ord('0') - return a+b+c+d + """Convert the DosType in a 4 letter tag string to 32 bit value""" + if len(s) != 4: + return 0 + a = ord(s[0]) << 24 + b = ord(s[1]) << 16 + c = ord(s[2]) << 8 + l = s[3] + d = ord(l) + if l.isdigit(): + d = d - ord("0") + return a + b + c + d + def num_to_tag_str(l): - """Convert the DosType in a 32 bit value to its 4 letter tag string""" - a = chr((l >> 24) & 0xff) - b = chr((l >> 16) & 0xff) - c = chr((l >> 8) & 0xff) - last = (l & 0xff) - if last < 32: - last = chr(last + 48) - else: - last = chr(last) - return a+b+c+last + """Convert the DosType in a 32 bit value to its 4 letter tag string""" + a = chr((l >> 24) & 0xFF) + b = chr((l >> 16) & 0xFF) + c = chr((l >> 8) & 0xFF) + last = l & 0xFF + if last < 32: + last = chr(last + 48) + else: + last = chr(last) + return a + b + c + last + def get_dos_type_str(dos_type): - """return description of dos type as a string""" - return dos_type_names[dos_type & 0x7] + """return description of dos type as a string""" + return dos_type_names[dos_type & 0x7] + def is_valid(dos_type): - """check if its a valid dos type""" - return (dos_type >= DOS0) and (dos_type <= DOS7) + """check if its a valid dos type""" + return (dos_type >= DOS0) and (dos_type <= DOS7) + def is_ffs(dos_type): - """check if its a fast file system dostype""" - return (dos_type & DOS_MASK_FFS) == DOS_MASK_FFS + """check if its a fast file system dostype""" + return (dos_type & DOS_MASK_FFS) == DOS_MASK_FFS + def is_intl(dos_type): - """check if international mode is enabled in dostype""" - return is_dircache(dos_type) or is_longname(dos_type) or (dos_type & DOS_MASK_INTL) == DOS_MASK_INTL + """check if international mode is enabled in dostype""" + return ( + is_dircache(dos_type) + or is_longname(dos_type) + or (dos_type & DOS_MASK_INTL) == DOS_MASK_INTL + ) + def is_dircache(dos_type): - """check if dir cache mode is enabled in dostype""" - return (dos_type == DOS4) or (dos_type == DOS5) + """check if dir cache mode is enabled in dostype""" + return (dos_type == DOS4) or (dos_type == DOS5) + def is_longname(dos_type): - """check if long filename mode is enabled in dostype""" - return (dos_type == DOS6) or (dos_type == DOS7) + """check if long filename mode is enabled in dostype""" + return (dos_type == DOS6) or (dos_type == DOS7) + def rootblock_tracks_used_blocks(dos_type): - """checks if the number of used blocks is stored within the rootblock""" - return (dos_type == DOS6) or (dos_type == DOS7) + """checks if the number of used blocks is stored within the rootblock""" + return (dos_type == DOS6) or (dos_type == DOS7) diff --git a/amitools/fs/FSError.py b/amitools/fs/FSError.py index 40d2fb46..8eda7905 100644 --- a/amitools/fs/FSError.py +++ b/amitools/fs/FSError.py @@ -22,52 +22,51 @@ INVALID_VOLUME_NAME = 22 error_names = { - INVALID_BOOT_BLOCK : "Invalid Boot Block", - INVALID_ROOT_BLOCK : "Invalid Root Block", - INVALID_USER_DIR_BLOCK : "Invalid UserDir Block", - INVALID_FILE_HEADER_BLOCK : "Invalid FileHeader Block", - INVALID_FILE_LIST_BLOCK : "Invalid FileList Block", - INVALID_FILE_DATA_BLOCK : "Invalid FileData Block", - NO_FREE_BLOCKS : "No Free Blocks", - UNSUPPORTED_DIR_BLOCK : "Unsupported Dir Block", - INVALID_FILE_NAME : "Invalid File Name", - NAME_ALREADY_EXISTS : "Name already exists", - INVALID_SEQ_NUM : "Invalid Sequence Number", - FILE_LIST_BLOCK_COUNT_MISMATCH : "FileList Block Count Mismatch", - FILE_DATA_BLOCK_COUNT_MISMATCH : "FileData Block Count Mismatch", - INVALID_BITMAP_BLOCK : "Invalid Bitmap Block", - BITMAP_BLOCK_COUNT_MISMATCH : "Bitmap Block Count Mismatch", - BITMAP_SIZE_MISMATCH : "Bitmap Size Mismatch", - DELETE_NOT_ALLOWED : "Delete Not Allowed", - INTERNAL_ERROR : "Internal Error", - INVALID_PROTECT_FORMAT : "Invalid Protect Format", - INVALID_PARENT_DIRECTORY : "Invalid Parent Directory", - FILE_NOT_FOUND : "File not found", - INVALID_VOLUME_NAME : "Invalid volume name" + INVALID_BOOT_BLOCK: "Invalid Boot Block", + INVALID_ROOT_BLOCK: "Invalid Root Block", + INVALID_USER_DIR_BLOCK: "Invalid UserDir Block", + INVALID_FILE_HEADER_BLOCK: "Invalid FileHeader Block", + INVALID_FILE_LIST_BLOCK: "Invalid FileList Block", + INVALID_FILE_DATA_BLOCK: "Invalid FileData Block", + NO_FREE_BLOCKS: "No Free Blocks", + UNSUPPORTED_DIR_BLOCK: "Unsupported Dir Block", + INVALID_FILE_NAME: "Invalid File Name", + NAME_ALREADY_EXISTS: "Name already exists", + INVALID_SEQ_NUM: "Invalid Sequence Number", + FILE_LIST_BLOCK_COUNT_MISMATCH: "FileList Block Count Mismatch", + FILE_DATA_BLOCK_COUNT_MISMATCH: "FileData Block Count Mismatch", + INVALID_BITMAP_BLOCK: "Invalid Bitmap Block", + BITMAP_BLOCK_COUNT_MISMATCH: "Bitmap Block Count Mismatch", + BITMAP_SIZE_MISMATCH: "Bitmap Size Mismatch", + DELETE_NOT_ALLOWED: "Delete Not Allowed", + INTERNAL_ERROR: "Internal Error", + INVALID_PROTECT_FORMAT: "Invalid Protect Format", + INVALID_PARENT_DIRECTORY: "Invalid Parent Directory", + FILE_NOT_FOUND: "File not found", + INVALID_VOLUME_NAME: "Invalid volume name", } + class FSError(Exception): - def __init__(self, code, node=None, block=None, file_name=None, extra=None): - self.code = code - self.node = node - self.block = block - self.file_name = file_name - self.extra = extra - - def __str__(self): - if self.code in error_names: - code_str = str(error_names[self.code]) - else: - code_str = "?" - srcs = [] - if self.node != None: - srcs.append("node=" + str(self.node)) - if self.block != None: - srcs.append("block=" + str(self.block)) - if self.file_name != None: - srcs.append("file_name=" + self.file_name.get_unicode()) - if self.extra != None: - srcs.append(str(self.extra)) - return "%s(%d):%s" % (code_str, self.code, ",".join(srcs)) + def __init__(self, code, node=None, block=None, file_name=None, extra=None): + self.code = code + self.node = node + self.block = block + self.file_name = file_name + self.extra = extra - \ No newline at end of file + def __str__(self): + if self.code in error_names: + code_str = str(error_names[self.code]) + else: + code_str = "?" + srcs = [] + if self.node != None: + srcs.append("node=" + str(self.node)) + if self.block != None: + srcs.append("block=" + str(self.block)) + if self.file_name != None: + srcs.append("file_name=" + self.file_name.get_unicode()) + if self.extra != None: + srcs.append(str(self.extra)) + return "%s(%d):%s" % (code_str, self.code, ",".join(srcs)) diff --git a/amitools/fs/FSString.py b/amitools/fs/FSString.py index 0708ebbe..6d1afde2 100644 --- a/amitools/fs/FSString.py +++ b/amitools/fs/FSString.py @@ -3,44 +3,43 @@ class FSString: - """Simple string class that allows to manage strings encoded in Latin-1 used for the Amiga FS. + """Simple string class that allows to manage strings encoded in Latin-1 used for the Amiga FS. It stores the string internally as a python UTF-8 string but allows to convert to Amiga format. """ - - def __init__(self, txt="", encoding="Latin-1"): - """Init the string. Either with a string or with bytes. + + def __init__(self, txt="", encoding="Latin-1"): + """Init the string. Either with a string or with bytes. If the latter is given then the "encoding" flag determines the encoding. """ - if type(txt) is str: - self.txt = txt - elif type(txt) is bytes: - self.txt = txt.decode(encoding) - else: - raise ValueError("FSString must be str or bytes!") - - def __repr__(self): - return "FSString({})".format(self.txt) - - def __str__(self): - return self.txt - - def __eq__(self, other): - if isinstance(other, FSString): - return self.txt == other.txt - else: - return False - - def __ne__(self, other): - if isinstance(other, FSString): - return self.txt != other.txt - else: - return True - - def get_unicode(self): - return self.txt - - def get_ami_str(self): - # make sure to normalize utf-8 - nrm = unicodedata.normalize("NFKC", self.txt) - return nrm.encode("Latin-1") - \ No newline at end of file + if type(txt) is str: + self.txt = txt + elif type(txt) is bytes: + self.txt = txt.decode(encoding) + else: + raise ValueError("FSString must be str or bytes!") + + def __repr__(self): + return "FSString({})".format(self.txt) + + def __str__(self): + return self.txt + + def __eq__(self, other): + if isinstance(other, FSString): + return self.txt == other.txt + else: + return False + + def __ne__(self, other): + if isinstance(other, FSString): + return self.txt != other.txt + else: + return True + + def get_unicode(self): + return self.txt + + def get_ami_str(self): + # make sure to normalize utf-8 + nrm = unicodedata.normalize("NFKC", self.txt) + return nrm.encode("Latin-1") diff --git a/amitools/fs/FileName.py b/amitools/fs/FileName.py index 8d939d3b..e3b5f792 100644 --- a/amitools/fs/FileName.py +++ b/amitools/fs/FileName.py @@ -1,111 +1,113 @@ +from .FSString import FSString +class FileName: + root_path_aliases = ("", "/", ":") -from .FSString import FSString + def __init__(self, name, is_intl=False, is_longname=False): + # check that name is a FSString + if not isinstance(name, FSString): + raise ValueError("FileName's name must be a FSString") + self.name = name + self.is_intl = is_intl + self.is_longname = is_longname -class FileName: - root_path_aliases = ('', '/', ':') - - def __init__(self, name, is_intl=False, is_longname=False): - # check that name is a FSString - if not isinstance(name, FSString): - raise ValueError("FileName's name must be a FSString") - self.name = name - self.is_intl = is_intl - self.is_longname = is_longname - - def __str__(self): - return self.name - - def __repr__(self): - return self.name - - def is_root_path_alias(self): - return self.name.get_unicode() in self.root_path_aliases - - def has_dir_prefix(self): - return self.name.get_unicode().find("/") != -1 + def __str__(self): + return self.name + + def __repr__(self): + return self.name + + def is_root_path_alias(self): + return self.name.get_unicode() in self.root_path_aliases + + def has_dir_prefix(self): + return self.name.get_unicode().find("/") != -1 + + def split_path(self): + pc = self.name.get_unicode().split("/") + p = [] + for path in pc: + p.append( + FileName( + FSString(path), is_intl=self.is_intl, is_longname=self.is_longname + ) + ) + return p - def split_path(self): - pc = self.name.get_unicode().split("/") - p = [] - for path in pc: - p.append(FileName(FSString(path), is_intl=self.is_intl, is_longname=self.is_longname)) - return p - - def get_dir_and_base_name(self): - """Return portion after last slash '/' or the full name in unicode""" - s = self.name.get_unicode() - pos = s.rfind('/') - if pos != -1: - dir_name = s[:pos] - file_name = s[pos+1:] - if len(file_name) == 0: - return FSString(dir_name), None - else: - return FSString(dir_name), FSString(file_name) - else: - return None, self.name - - def get_upper_ami_str(self): - result = self.name.get_ami_str().upper() - if self.is_intl: - r = bytearray() - for i in range(len(result)): - o = result[i] - if o >= 224 and o <= 254 and o != 247: - r.append(o - (ord('a')-ord('A'))) + def get_dir_and_base_name(self): + """Return portion after last slash '/' or the full name in unicode""" + s = self.name.get_unicode() + pos = s.rfind("/") + if pos != -1: + dir_name = s[:pos] + file_name = s[pos + 1 :] + if len(file_name) == 0: + return FSString(dir_name), None + else: + return FSString(dir_name), FSString(file_name) else: - r.append(o) - return r - else: - return result - - def is_valid(self): - # check if path contains dir prefix components - if self.has_dir_prefix(): - e = self.split_path() - # empty path? - if len(e) == 0: - return False - for p in e: - if not p.is_valid(): + return None, self.name + + def get_upper_ami_str(self): + result = self.name.get_ami_str().upper() + if self.is_intl: + r = bytearray() + for i in range(len(result)): + o = result[i] + if o >= 224 and o <= 254 and o != 247: + r.append(o - (ord("a") - ord("A"))) + else: + r.append(o) + return r + else: + return result + + def is_valid(self): + # check if path contains dir prefix components + if self.has_dir_prefix(): + e = self.split_path() + # empty path? + if len(e) == 0: + return False + for p in e: + if not p.is_valid(): + return False + return True + else: + # single file name + s = self.name.get_ami_str() + # check for invalid chars + for c in s: + # o = ord(c) + # if o == ':' or o == '/': + # FIXME: FS + if c == ":" or c == "/": + return False + # check max size + if self.is_longname: + if len(s) > 110: + return False + elif len(s) > 30: return False return True - else: - # single file name - s = self.name.get_ami_str() - # check for invalid chars - for c in s: - # o = ord(c) - # if o == ':' or o == '/': - # FIXME: FS - if c == ':' or c == '/': - return False - # check max size - if self.is_longname: - if len(s) > 110: - return False - elif len(s) > 30: - return False - return True - - def hash(self, hash_size=72): - up = self.get_upper_ami_str() - h = len(up) - for c in up: - h = h * 13; - h += c - h &= 0x7ff - h = h % hash_size - return h - def get_name(self): - """Return file name string as a FSString.""" - return self.name + def hash(self, hash_size=72): + up = self.get_upper_ami_str() + h = len(up) + for c in up: + h = h * 13 + h += c + h &= 0x7FF + h = h % hash_size + return h + + def get_name(self): + """Return file name string as a FSString.""" + return self.name + + def get_ami_str_name(self): + return self.name.get_ami_str() - def get_ami_str_name(self): - return self.name.get_ami_str() - - def get_unicode_name(self): - return self.name.get_unicode() + def get_unicode_name(self): + return self.name.get_unicode() diff --git a/amitools/fs/Imager.py b/amitools/fs/Imager.py index 92d14e8e..67dd02bc 100644 --- a/amitools/fs/Imager.py +++ b/amitools/fs/Imager.py @@ -1,6 +1,3 @@ - - - import os import os.path import sys @@ -18,229 +15,232 @@ from .FSString import FSString from .MetaInfoFSUAE import MetaInfoFSUAE + class Imager: - META_MODE_NONE = 0 - META_MODE_DB = 1 - META_MODE_FSUAE = 2 - - def __init__(self, path_encoding=None, meta_mode=META_MODE_DB): - self.meta_mode = meta_mode - self.meta_db = None - self.meta_fsuae = MetaInfoFSUAE() - self.total_bytes = 0 - self.path_encoding = path_encoding - # get path name encoding for host file system - if self.path_encoding == None: - self.path_encoding = sys.getfilesystemencoding() - - def get_total_bytes(self): - return self.total_bytes - - # ----- unpack ----- - - def unpack(self, volume, out_path): - # check for volume path - vol_name = volume.name.get_unicode() - if not os.path.exists(out_path): - vol_path = out_path - else: - path = os.path.abspath(out_path) - vol_path = os.path.join(path, vol_name) - if os.path.exists(vol_path): - raise IOError("Unpack directory already exists: "+vol_path) - # check for meta file - meta_path = vol_path + ".xdfmeta" - if os.path.exists(meta_path): - raise IOError("Unpack meta file already exists:"+meta_path) - # check for block dev file - blkdev_path = vol_path + ".blkdev" - if os.path.exists(blkdev_path): - raise IOError("Unpack blkdev file aready exists:"+blkdev_path) - # create volume path - if self.meta_mode != self.META_MODE_NONE: - self.meta_db = MetaDB() - self.unpack_root(volume, vol_path) - # save meta db - if self.meta_db: - self.meta_db.set_volume_name(volume.name.get_unicode()) - self.meta_db.set_root_meta_info(volume.get_meta_info()) - self.meta_db.set_dos_type(volume.boot.dos_type) - self.meta_db.save(meta_path) - # save boot code - if volume.boot.boot_code != None: - boot_code_path = vol_path + ".bootcode" - f = open(boot_code_path,"wb") - f.write(volume.boot.boot_code) - f.close() - # save blkdev: geo and block size - f = open(blkdev_path,"wb") - msg = "%s\n%s\n" % (volume.blkdev.get_chs_str(), - volume.blkdev.get_block_size_str()) - f.write(msg.encode('UTF-8')) - f.close() - - def unpack_root(self, volume, vol_path): - self.unpack_dir(volume.get_root_dir(), vol_path) - - def unpack_dir(self, dir, path): - if not os.path.exists(path): - os.mkdir(path) - for e in dir.get_entries(): - self.unpack_node(e, path) - - def unpack_node(self, node, path): - name = node.name.get_unicode_name() - file_path = os.path.join(path, name) - # store meta info - if self.meta_mode == self.META_MODE_DB: - # get path as FSString - node_path = node.get_node_path_name() - self.meta_db.set_meta_info(node_path.get_unicode(), node.meta_info) - # store meta in .uaem file - elif self.meta_mode == self.META_MODE_FSUAE: - uaem_path = file_path + self.meta_fsuae.get_suffix() - self.meta_fsuae.save_meta(uaem_path, node.meta_info) - # sub dir - if node.is_dir(): - sub_dir = file_path - os.mkdir(sub_dir) - for sub_node in node.get_entries(): - self.unpack_node(sub_node, sub_dir) - node.flush() - # file - elif node.is_file(): - data = node.get_file_data() - node.flush() - fh = open(file_path, "wb") - fh.write(data) - fh.close() - self.total_bytes += len(data) - - # ----- pack ----- - - def pack(self, in_path, image_file, force=True, options=None, dos_type=None): - self.pack_begin(in_path) - blkdev = self.pack_create_blkdev(in_path, image_file, force, options) - if blkdev == None: - raise IOError("Can't create block device for image: "+in_path) - volume = self.pack_create_volume(in_path, blkdev, dos_type) - if not volume.valid: - raise IOError("Can't create volume for image: "+in_path) - self.pack_root(in_path, volume) - self.pack_end(in_path, volume) - - def pack_begin(self, in_path): - # remove trailing slash - if in_path[-1] == '/': - in_path = in_path[:-1] - meta_path = in_path + ".xdfmeta" - if os.path.exists(meta_path): - self.meta_db = MetaDB() - self.meta_db.load(meta_path) - - def pack_end(self, in_path, volume): - boot_code_path = in_path + ".bootcode" - if os.path.exists(boot_code_path): - # read boot code - f = open(boot_code_path, "rb") - data = f.read() - f.close() - # write boot code - bb = volume.boot - ok = bb.set_boot_code(data) - if ok: - bb.write() - else: - raise IOError("Invalid Boot Code") - - def pack_create_blkdev(self, in_path, image_file, force=True, options=None): - factory = BlkDevFactory() - blkdev = None - if not force: - # try to open an existing image or return None - blkdev = factory.open(image_file, none_if_missing=True) - - if not blkdev: - # try to read options from blkdev file - if options == None or len(options) == 0: - blkdev_path = in_path + ".blkdev" + META_MODE_NONE = 0 + META_MODE_DB = 1 + META_MODE_FSUAE = 2 + + def __init__(self, path_encoding=None, meta_mode=META_MODE_DB): + self.meta_mode = meta_mode + self.meta_db = None + self.meta_fsuae = MetaInfoFSUAE() + self.total_bytes = 0 + self.path_encoding = path_encoding + # get path name encoding for host file system + if self.path_encoding == None: + self.path_encoding = sys.getfilesystemencoding() + + def get_total_bytes(self): + return self.total_bytes + + # ----- unpack ----- + + def unpack(self, volume, out_path): + # check for volume path + vol_name = volume.name.get_unicode() + if not os.path.exists(out_path): + vol_path = out_path + else: + path = os.path.abspath(out_path) + vol_path = os.path.join(path, vol_name) + if os.path.exists(vol_path): + raise IOError("Unpack directory already exists: " + vol_path) + # check for meta file + meta_path = vol_path + ".xdfmeta" + if os.path.exists(meta_path): + raise IOError("Unpack meta file already exists:" + meta_path) + # check for block dev file + blkdev_path = vol_path + ".blkdev" if os.path.exists(blkdev_path): - f = open(blkdev_path, "rb") - options = {} - for line in f: - KeyValue.parse_key_value_string(line, options) - f.close() - # create a new blkdev - blkdev = factory.create(image_file, force=force, options=options) - return blkdev - - def pack_create_volume(self, in_path, blkdev, dos_type=None): - if self.meta_db != None: - name = self.meta_db.get_volume_name() - meta_info = self.meta_db.get_root_meta_info() - if dos_type is None: - dos_type = self.meta_db.get_dos_type() - else: - # try to derive volume name from image name - if in_path == None or in_path == "": - raise IOError("Invalid pack input path!") - # remove trailing slash - if in_path[-1] == '/': - in_path = in_path[:-1] - name = os.path.basename(in_path) - meta_info = None - if dos_type is None: - dos_type = DosType.DOS0 - volume = ADFSVolume(blkdev) - volume.create(FSString(name), meta_info, dos_type=dos_type) - return volume - - def pack_root(self, in_path, volume): - self.pack_dir(in_path, volume.get_root_dir()) - - def pack_dir(self, in_path, parent_node): - path = os.path.abspath(in_path) - if not os.path.exists(path): - raise IOError("Pack directory does not exist: "+path) - for name in os.listdir(in_path): - sub_path = os.path.join(in_path, name) - self.pack_entry(sub_path, parent_node) - - def pack_entry(self, in_path, parent_node): - # skip .uaem files - if self.meta_fsuae.is_meta_file(in_path): - return - # convert amiga name - ami_name = FSString(os.path.basename(in_path)).get_ami_str() - # check for meta file - meta_path = in_path + self.meta_fsuae.get_suffix() - if os.path.isfile(meta_path): - meta_info = self.meta_fsuae.load_meta(meta_path) - # retrieve meta info for path from DB - elif self.meta_db != None: - ami_path = parent_node.get_node_path_name().get_unicode() - if ami_path != "": - ami_path += "/" + ami_name - else: - ami_path = ami_name - meta_info = self.meta_db.get_meta_info(ami_path) - else: - meta_info = None - - # pack directory - if os.path.isdir(in_path): - node = parent_node.create_dir(FSString(ami_name), meta_info, False) - for name in os.listdir(in_path): - sub_path = os.path.join(in_path, name) - self.pack_entry(sub_path, node) - node.flush() - # pack file - elif os.path.isfile(in_path): - # read file - fh = open(in_path, "rb") - data = fh.read() - fh.close() - node = parent_node.create_file(FSString(ami_name), data, meta_info, False) - node.flush() - self.total_bytes += len(data) + raise IOError("Unpack blkdev file aready exists:" + blkdev_path) + # create volume path + if self.meta_mode != self.META_MODE_NONE: + self.meta_db = MetaDB() + self.unpack_root(volume, vol_path) + # save meta db + if self.meta_db: + self.meta_db.set_volume_name(volume.name.get_unicode()) + self.meta_db.set_root_meta_info(volume.get_meta_info()) + self.meta_db.set_dos_type(volume.boot.dos_type) + self.meta_db.save(meta_path) + # save boot code + if volume.boot.boot_code != None: + boot_code_path = vol_path + ".bootcode" + f = open(boot_code_path, "wb") + f.write(volume.boot.boot_code) + f.close() + # save blkdev: geo and block size + f = open(blkdev_path, "wb") + msg = "%s\n%s\n" % ( + volume.blkdev.get_chs_str(), + volume.blkdev.get_block_size_str(), + ) + f.write(msg.encode("UTF-8")) + f.close() + + def unpack_root(self, volume, vol_path): + self.unpack_dir(volume.get_root_dir(), vol_path) + + def unpack_dir(self, dir, path): + if not os.path.exists(path): + os.mkdir(path) + for e in dir.get_entries(): + self.unpack_node(e, path) + + def unpack_node(self, node, path): + name = node.name.get_unicode_name() + file_path = os.path.join(path, name) + # store meta info + if self.meta_mode == self.META_MODE_DB: + # get path as FSString + node_path = node.get_node_path_name() + self.meta_db.set_meta_info(node_path.get_unicode(), node.meta_info) + # store meta in .uaem file + elif self.meta_mode == self.META_MODE_FSUAE: + uaem_path = file_path + self.meta_fsuae.get_suffix() + self.meta_fsuae.save_meta(uaem_path, node.meta_info) + # sub dir + if node.is_dir(): + sub_dir = file_path + os.mkdir(sub_dir) + for sub_node in node.get_entries(): + self.unpack_node(sub_node, sub_dir) + node.flush() + # file + elif node.is_file(): + data = node.get_file_data() + node.flush() + fh = open(file_path, "wb") + fh.write(data) + fh.close() + self.total_bytes += len(data) + + # ----- pack ----- + + def pack(self, in_path, image_file, force=True, options=None, dos_type=None): + self.pack_begin(in_path) + blkdev = self.pack_create_blkdev(in_path, image_file, force, options) + if blkdev == None: + raise IOError("Can't create block device for image: " + in_path) + volume = self.pack_create_volume(in_path, blkdev, dos_type) + if not volume.valid: + raise IOError("Can't create volume for image: " + in_path) + self.pack_root(in_path, volume) + self.pack_end(in_path, volume) + + def pack_begin(self, in_path): + # remove trailing slash + if in_path[-1] == "/": + in_path = in_path[:-1] + meta_path = in_path + ".xdfmeta" + if os.path.exists(meta_path): + self.meta_db = MetaDB() + self.meta_db.load(meta_path) + + def pack_end(self, in_path, volume): + boot_code_path = in_path + ".bootcode" + if os.path.exists(boot_code_path): + # read boot code + f = open(boot_code_path, "rb") + data = f.read() + f.close() + # write boot code + bb = volume.boot + ok = bb.set_boot_code(data) + if ok: + bb.write() + else: + raise IOError("Invalid Boot Code") + + def pack_create_blkdev(self, in_path, image_file, force=True, options=None): + factory = BlkDevFactory() + blkdev = None + if not force: + # try to open an existing image or return None + blkdev = factory.open(image_file, none_if_missing=True) + + if not blkdev: + # try to read options from blkdev file + if options == None or len(options) == 0: + blkdev_path = in_path + ".blkdev" + if os.path.exists(blkdev_path): + f = open(blkdev_path, "rb") + options = {} + for line in f: + KeyValue.parse_key_value_string(line, options) + f.close() + # create a new blkdev + blkdev = factory.create(image_file, force=force, options=options) + return blkdev + + def pack_create_volume(self, in_path, blkdev, dos_type=None): + if self.meta_db != None: + name = self.meta_db.get_volume_name() + meta_info = self.meta_db.get_root_meta_info() + if dos_type is None: + dos_type = self.meta_db.get_dos_type() + else: + # try to derive volume name from image name + if in_path == None or in_path == "": + raise IOError("Invalid pack input path!") + # remove trailing slash + if in_path[-1] == "/": + in_path = in_path[:-1] + name = os.path.basename(in_path) + meta_info = None + if dos_type is None: + dos_type = DosType.DOS0 + volume = ADFSVolume(blkdev) + volume.create(FSString(name), meta_info, dos_type=dos_type) + return volume + + def pack_root(self, in_path, volume): + self.pack_dir(in_path, volume.get_root_dir()) + + def pack_dir(self, in_path, parent_node): + path = os.path.abspath(in_path) + if not os.path.exists(path): + raise IOError("Pack directory does not exist: " + path) + for name in os.listdir(in_path): + sub_path = os.path.join(in_path, name) + self.pack_entry(sub_path, parent_node) + + def pack_entry(self, in_path, parent_node): + # skip .uaem files + if self.meta_fsuae.is_meta_file(in_path): + return + # convert amiga name + ami_name = FSString(os.path.basename(in_path)).get_ami_str() + # check for meta file + meta_path = in_path + self.meta_fsuae.get_suffix() + if os.path.isfile(meta_path): + meta_info = self.meta_fsuae.load_meta(meta_path) + # retrieve meta info for path from DB + elif self.meta_db != None: + ami_path = parent_node.get_node_path_name().get_unicode() + if ami_path != "": + ami_path += "/" + ami_name + else: + ami_path = ami_name + meta_info = self.meta_db.get_meta_info(ami_path) + else: + meta_info = None + + # pack directory + if os.path.isdir(in_path): + node = parent_node.create_dir(FSString(ami_name), meta_info, False) + for name in os.listdir(in_path): + sub_path = os.path.join(in_path, name) + self.pack_entry(sub_path, node) + node.flush() + # pack file + elif os.path.isfile(in_path): + # read file + fh = open(in_path, "rb") + data = fh.read() + fh.close() + node = parent_node.create_file(FSString(ami_name), data, meta_info, False) + node.flush() + self.total_bytes += len(data) diff --git a/amitools/fs/MetaDB.py b/amitools/fs/MetaDB.py index ef2e74b8..ae03c313 100644 --- a/amitools/fs/MetaDB.py +++ b/amitools/fs/MetaDB.py @@ -1,6 +1,3 @@ - - - from .MetaInfo import MetaInfo from .RootMetaInfo import RootMetaInfo from .ProtectFlags import ProtectFlags @@ -9,141 +6,148 @@ from . import DosType from .FSString import FSString + class MetaDB: - def __init__(self): - self.metas = {} - self.vol_name = None - self.vol_meta = None - self.dos_type = DosType.DOS0 - - def set_root_meta_info(self, meta): - self.vol_meta = meta - - def get_root_meta_info(self): - return self.vol_meta - - def set_volume_name(self, name): - if type(name) != str: - raise ValueError("set_volume_name must be unicode") - self.vol_name = name - - def get_volume_name(self): - return self.vol_name - - def set_dos_type(self, dos_type): - self.dos_type = dos_type - - def get_dos_type(self): - return self.dos_type - - def set_meta_info(self, path, meta_info): - if type(path) != str: - raise ValueError("set_meta_info: path must be unicode") - self.metas[path] = meta_info - - def get_meta_info(self, path): - if path in self.metas: - return self.metas[path] - else: - return None - - def dump(self): - print(self.vol_name, self.vol_meta, self.dos_type) - for m in self.metas: - print(m) - - # ----- load ----- - - def load(self, file_path): - self.metas = {} - f = open(file_path, "r") - first = True - for line in f: - if first: - self.load_header(line) - first = False - else: - self.load_entry(line) - f.close() - - def load_header(self, line): - pos = line.find(':') - if pos == -1: - raise IOError("Invalid xdfmeta header! (no colon in line)") - # first extract volume name - vol_name = line[:pos] - self.vol_name = vol_name.decode("UTF-8") - line = line[pos+1:] - # now get parameters - comp = line.split(',') - if len(comp) != 4: - raise IOError("Invalid xdfmeta header! (wrong number of parameters found)") - # first dos type - dos_type_str = comp[0] - if len(dos_type_str) != 4: - raise IOError("Invalid xdfmeta dostype string") - num = ord(dos_type_str[3]) - ord('0') - if num < 0 or num > 7: - raise IOError("Invalid xdfmeta dostype number") - self.dos_type = DosType.DOS0 + num - # then time stamps - create_ts = TimeStamp() - ok1 = create_ts.parse(comp[1]) - disk_ts = TimeStamp() - ok2 = disk_ts.parse(comp[2]) - mod_ts = TimeStamp() - ok3 = mod_ts.parse(comp[3]) - if not ok1 or not ok2 or not ok3: - raise IOError("Invalid xdfmeta header! (invalid timestamp found)") - self.vol_meta = RootMetaInfo(create_ts, disk_ts, mod_ts) - - def load_entry(self, line): - line = line.strip() - # path - pos = line.find(':') - if pos == -1: - raise IOError("Invalid xdfmeta file! (no colon in line)") - path = line[:pos].decode("UTF-8") - # prot - line = line[pos+1:] - pos = line.find(',') - if pos == -1: - raise IOError("Invalid xdfmeta file! (no first comma)") - prot_str = line[:pos] - prot = ProtectFlags() - prot.parse(prot_str) - # time - line = line[pos+1:] - pos = line.find(',') - if pos == -1: - raise IOError("Invalid xdfmeta file! (no second comma)") - time_str = line[:pos] - time = TimeStamp() - time.parse(time_str) - # comment - comment = FSString(line[pos+1:].decode("UTF-8")) - # meta info - mi = MetaInfo(protect_flags=prot, mod_ts=time, comment=comment) - self.set_meta_info(path, mi) - - # ----- save ----- - - def save(self, file_path): - f = open(file_path, "w") - # header - mi = self.vol_meta - num = self.dos_type - DosType.DOS0 + ord('0') - dos_type_str = "DOS%c" % num - vol_name = self.vol_name.encode("UTF-8") - line = "%s:%s,%s,%s,%s\n" % (vol_name, dos_type_str, mi.get_create_ts(), mi.get_disk_ts(), mi.get_mod_ts()) - f.write(line) - # entries - for path in sorted(self.metas): - meta_info = self.metas[path] - protect = meta_info.get_protect_short_str() - mod_time = meta_info.get_mod_time_str() - comment = meta_info.get_comment_unicode_str().encode("UTF-8") - path_name = path.encode("UTF-8") - line = "%s:%s,%s,%s\n" % (path_name, protect, mod_time, comment) - f.write(line) - f.close() + def __init__(self): + self.metas = {} + self.vol_name = None + self.vol_meta = None + self.dos_type = DosType.DOS0 + + def set_root_meta_info(self, meta): + self.vol_meta = meta + + def get_root_meta_info(self): + return self.vol_meta + + def set_volume_name(self, name): + if type(name) != str: + raise ValueError("set_volume_name must be unicode") + self.vol_name = name + + def get_volume_name(self): + return self.vol_name + + def set_dos_type(self, dos_type): + self.dos_type = dos_type + + def get_dos_type(self): + return self.dos_type + + def set_meta_info(self, path, meta_info): + if type(path) != str: + raise ValueError("set_meta_info: path must be unicode") + self.metas[path] = meta_info + + def get_meta_info(self, path): + if path in self.metas: + return self.metas[path] + else: + return None + + def dump(self): + print(self.vol_name, self.vol_meta, self.dos_type) + for m in self.metas: + print(m) + + # ----- load ----- + + def load(self, file_path): + self.metas = {} + f = open(file_path, "r") + first = True + for line in f: + if first: + self.load_header(line) + first = False + else: + self.load_entry(line) + f.close() + + def load_header(self, line): + pos = line.find(":") + if pos == -1: + raise IOError("Invalid xdfmeta header! (no colon in line)") + # first extract volume name + vol_name = line[:pos] + self.vol_name = vol_name.decode("UTF-8") + line = line[pos + 1 :] + # now get parameters + comp = line.split(",") + if len(comp) != 4: + raise IOError("Invalid xdfmeta header! (wrong number of parameters found)") + # first dos type + dos_type_str = comp[0] + if len(dos_type_str) != 4: + raise IOError("Invalid xdfmeta dostype string") + num = ord(dos_type_str[3]) - ord("0") + if num < 0 or num > 7: + raise IOError("Invalid xdfmeta dostype number") + self.dos_type = DosType.DOS0 + num + # then time stamps + create_ts = TimeStamp() + ok1 = create_ts.parse(comp[1]) + disk_ts = TimeStamp() + ok2 = disk_ts.parse(comp[2]) + mod_ts = TimeStamp() + ok3 = mod_ts.parse(comp[3]) + if not ok1 or not ok2 or not ok3: + raise IOError("Invalid xdfmeta header! (invalid timestamp found)") + self.vol_meta = RootMetaInfo(create_ts, disk_ts, mod_ts) + + def load_entry(self, line): + line = line.strip() + # path + pos = line.find(":") + if pos == -1: + raise IOError("Invalid xdfmeta file! (no colon in line)") + path = line[:pos].decode("UTF-8") + # prot + line = line[pos + 1 :] + pos = line.find(",") + if pos == -1: + raise IOError("Invalid xdfmeta file! (no first comma)") + prot_str = line[:pos] + prot = ProtectFlags() + prot.parse(prot_str) + # time + line = line[pos + 1 :] + pos = line.find(",") + if pos == -1: + raise IOError("Invalid xdfmeta file! (no second comma)") + time_str = line[:pos] + time = TimeStamp() + time.parse(time_str) + # comment + comment = FSString(line[pos + 1 :].decode("UTF-8")) + # meta info + mi = MetaInfo(protect_flags=prot, mod_ts=time, comment=comment) + self.set_meta_info(path, mi) + + # ----- save ----- + + def save(self, file_path): + f = open(file_path, "w") + # header + mi = self.vol_meta + num = self.dos_type - DosType.DOS0 + ord("0") + dos_type_str = "DOS%c" % num + vol_name = self.vol_name.encode("UTF-8") + line = "%s:%s,%s,%s,%s\n" % ( + vol_name, + dos_type_str, + mi.get_create_ts(), + mi.get_disk_ts(), + mi.get_mod_ts(), + ) + f.write(line) + # entries + for path in sorted(self.metas): + meta_info = self.metas[path] + protect = meta_info.get_protect_short_str() + mod_time = meta_info.get_mod_time_str() + comment = meta_info.get_comment_unicode_str().encode("UTF-8") + path_name = path.encode("UTF-8") + line = "%s:%s,%s,%s\n" % (path_name, protect, mod_time, comment) + f.write(line) + f.close() diff --git a/amitools/fs/MetaInfo.py b/amitools/fs/MetaInfo.py index c3d65ba1..22581b0b 100644 --- a/amitools/fs/MetaInfo.py +++ b/amitools/fs/MetaInfo.py @@ -1,112 +1,110 @@ - - - from .ProtectFlags import * from .TimeStamp import * from .FSString import FSString + class MetaInfo: - def __init__(self, protect=None, mod_ts=None, comment=None, protect_flags=None): - if protect_flags != None: - self.set_protect_flags(protect_flags) - else: - self.set_protect(protect) - self.set_mod_ts(mod_ts) - self.set_comment(comment) - - def get_str_line(self): - """Return a unicode string with protect flags, mod time and (optional) comment""" - res = [] - res.append(self.get_protect_str()) - res.append(self.get_mod_time_str()) - comment = self.get_comment() - if comment == None: - res.append('') - else: - res.append(self.get_comment().get_unicode()) - return ' '.join(res) - - def get_mod_time_str(self): - if self.mod_ts != None: - return str(self.mod_ts) - else: - return ts_empty_string - - def get_protect_str(self): - if self.protect_flags != None: - return str(self.protect_flags) - else: - return ProtectFlags.empty_string - - def get_protect_short_str(self): - if self.protect_flags != None: - return self.protect_flags.short_str() - else: - return "" - - def set_protect(self, protect): - self.protect = protect - if self.protect != None: - self.protect_flags = ProtectFlags(protect) - else: - self.protect_flags = None - - def set_protect_flags(self, pf): - self.protect_flags = pf - self.protect = pf.mask - - def set_default_protect(self): - self.protect = 0 - self.protect_flags = ProtectFlags(self.protect) - - def set_current_as_mod_time(self): - mod_time = time.mktime(time.localtime()) - self.set_mod_time(mod_time) - - def set_mod_time(self, mod_time): - self.mod_time = mod_time - if self.mod_time != None: - self.mod_ts = TimeStamp() - self.mod_ts.from_secs(mod_time) - else: - self.mod_ts = None - - def set_mod_ts(self, mod_ts): - self.mod_ts = mod_ts - if self.mod_ts != None: - self.mod_time = self.mod_ts.get_secsf() - else: - self.mod_time = None - - def set_comment(self, comment): - """Set comment as a FSString""" - if comment != None and not isinstance(comment, FSString): - raise ValueError("Comment must be a FSString") - self.comment = comment - - def get_protect(self): - return self.protect - - def get_protect_flags(self): - return self.protect_flags - - def get_mod_time(self): - return self.mod_time - - def get_mod_ts(self): - return self.mod_ts - - def get_comment(self): - return self.comment - - def get_comment_ami_str(self): - if self.comment != None: - return self.comment.get_ami_str() - else: - return "" - - def get_comment_unicode_str(self): - if self.comment != None: - return self.comment.get_unicode() - else: - return "" + def __init__(self, protect=None, mod_ts=None, comment=None, protect_flags=None): + if protect_flags != None: + self.set_protect_flags(protect_flags) + else: + self.set_protect(protect) + self.set_mod_ts(mod_ts) + self.set_comment(comment) + + def get_str_line(self): + """Return a unicode string with protect flags, mod time and (optional) comment""" + res = [] + res.append(self.get_protect_str()) + res.append(self.get_mod_time_str()) + comment = self.get_comment() + if comment == None: + res.append("") + else: + res.append(self.get_comment().get_unicode()) + return " ".join(res) + + def get_mod_time_str(self): + if self.mod_ts != None: + return str(self.mod_ts) + else: + return ts_empty_string + + def get_protect_str(self): + if self.protect_flags != None: + return str(self.protect_flags) + else: + return ProtectFlags.empty_string + + def get_protect_short_str(self): + if self.protect_flags != None: + return self.protect_flags.short_str() + else: + return "" + + def set_protect(self, protect): + self.protect = protect + if self.protect != None: + self.protect_flags = ProtectFlags(protect) + else: + self.protect_flags = None + + def set_protect_flags(self, pf): + self.protect_flags = pf + self.protect = pf.mask + + def set_default_protect(self): + self.protect = 0 + self.protect_flags = ProtectFlags(self.protect) + + def set_current_as_mod_time(self): + mod_time = time.mktime(time.localtime()) + self.set_mod_time(mod_time) + + def set_mod_time(self, mod_time): + self.mod_time = mod_time + if self.mod_time != None: + self.mod_ts = TimeStamp() + self.mod_ts.from_secs(mod_time) + else: + self.mod_ts = None + + def set_mod_ts(self, mod_ts): + self.mod_ts = mod_ts + if self.mod_ts != None: + self.mod_time = self.mod_ts.get_secsf() + else: + self.mod_time = None + + def set_comment(self, comment): + """Set comment as a FSString""" + if comment != None and not isinstance(comment, FSString): + raise ValueError("Comment must be a FSString") + self.comment = comment + + def get_protect(self): + return self.protect + + def get_protect_flags(self): + return self.protect_flags + + def get_mod_time(self): + return self.mod_time + + def get_mod_ts(self): + return self.mod_ts + + def get_comment(self): + return self.comment + + def get_comment_ami_str(self): + if self.comment != None: + return self.comment.get_ami_str() + else: + return "" + + def get_comment_unicode_str(self): + if self.comment != None: + return self.comment.get_unicode() + else: + return "" diff --git a/amitools/fs/MetaInfoFSUAE.py b/amitools/fs/MetaInfoFSUAE.py index 6d54b817..60498080 100644 --- a/amitools/fs/MetaInfoFSUAE.py +++ b/amitools/fs/MetaInfoFSUAE.py @@ -11,20 +11,19 @@ class MetaInfoFSUAE: - @staticmethod def is_meta_file(path): return path.lower().endswith(".uaem") - + @staticmethod def get_suffix(): return ".uaem" def load_meta(self, path): with open(path, "rb") as fh: - data = fh.read().decode('utf-8') + data = fh.read().decode("utf-8") return self.parse_data(data) - + def parse_data(self, data): if data.endswith("\n"): data = data[:-1] @@ -68,4 +67,4 @@ def generate_data(self, meta_info): def save_meta(self, path, meta_info): with open(path, "wb") as fh: txt = self.generate_data(meta_info) - fh.write(txt.encode('utf-8')) + fh.write(txt.encode("utf-8")) diff --git a/amitools/fs/ProtectFlags.py b/amitools/fs/ProtectFlags.py index 666adff6..6974ac80 100644 --- a/amitools/fs/ProtectFlags.py +++ b/amitools/fs/ProtectFlags.py @@ -1,143 +1,145 @@ +from .FSError import * +class ProtectFlags: + FIBF_DELETE = 1 + FIBF_EXECUTE = 2 + FIBF_WRITE = 4 + FIBF_READ = 8 + FIBF_ARCHIVE = 16 + FIBF_PURE = 32 + FIBF_SCRIPT = 64 -from .FSError import * + flag_txt = "HSPArwed" + flag_num = len(flag_txt) + flag_none = 0xF # -------- + empty_string = "-" * flag_num -class ProtectFlags: - FIBF_DELETE = 1 - FIBF_EXECUTE = 2 - FIBF_WRITE = 4 - FIBF_READ = 8 - FIBF_ARCHIVE = 16 - FIBF_PURE = 32 - FIBF_SCRIPT = 64 - - flag_txt = "HSPArwed" - flag_num = len(flag_txt) - flag_none = 0xf # -------- - empty_string = "-" * flag_num - - def __init__(self, mask=0): - self.mask = mask - - def get_mask(self): - return self.mask - - def __str__(self): - txt = "" - pos = self.flag_num - 1 - m = 1 << pos - for i in range(self.flag_num): - bit = self.mask & m == m - show = '-' - flg = self.flag_txt[i] - flg_low = flg.lower() - if bit: - if flg_low != flg: - show = flg_low - else: - if flg_low == flg: - show = flg_low - txt += show - m >>= 1 - pos -= 1 - return txt - - def bin_str(self): - res = "" - m = 1 << (self.flag_num - 1) - for i in range(self.flag_num): - if m & self.mask == m: - res += "1" - else: - res += "0" - m >>= 1 - return res - - def short_str(self): - return str(self).replace("-","") - - def parse_full(self, s): - """parse a string with all flags""" - n = len(self.flag_txt) - if len(s) != n: - raise ValueError("full string size mismatch!") - mask = 0 - for i in range(n): - val = s[i] - ref = self.flag_txt[i] - ref_lo = ref.lower() - if val not in (ref, ref_lo, '-'): - raise ValueError("invalid protect char: " + val) - is_lo = ref == ref_lo - is_blank = val == '-' - if is_lo: - do_set = is_blank - else: - do_set = not is_blank - if do_set: - bit_pos = n - i - 1 - bit_mask = 1 << bit_pos - mask |= bit_mask - self.mask = mask - - def parse(self, s): - if len(s) == 0: - return - # allow to add with '+' or sub with '-' - n = self.flag_txt - mode = '+' - self.mask = self.flag_none - for a in s.lower(): - if a in '+-': - mode = a - else: - mask = None - is_low = None + def __init__(self, mask=0): + self.mask = mask + + def get_mask(self): + return self.mask + + def __str__(self): + txt = "" + pos = self.flag_num - 1 + m = 1 << pos + for i in range(self.flag_num): + bit = self.mask & m == m + show = "-" + flg = self.flag_txt[i] + flg_low = flg.lower() + if bit: + if flg_low != flg: + show = flg_low + else: + if flg_low == flg: + show = flg_low + txt += show + m >>= 1 + pos -= 1 + return txt + + def bin_str(self): + res = "" + m = 1 << (self.flag_num - 1) for i in range(self.flag_num): - flg = self.flag_txt[i] - flg_low = flg.lower() - if flg_low == a: - mask = 1<<(self.flag_num - 1 - i) - is_low = flg_low == flg - break - if mask == None: - raise FSError(INVALID_PROTECT_FORMAT,extra="char: "+a) - # apply mask - if mode == '+': - if is_low: - self.mask &= ~mask - else: - self.mask |= mask - else: - if is_low: - self.mask |= mask - else: - self.mask &= ~mask - - def is_set(self, mask): - return self.mask & mask == 0 # LO active - def set(self, mask): - self.mask &= ~mask - def clr(self, mask): - self.mask |= mask - - def is_d(self): - return self.is_set(self.FIBF_DELETE) - def is_e(self): - return self.is_set(self.FIBF_EXECUTE) - def is_w(self): - return self.is_set(self.FIBF_WRITE) - def is_r(self): - return self.is_set(self.FIBF_READ) - -if __name__ == '__main__': - inp = ["h","s","p","a","r","w","e","d"] - for i in inp: - p = ProtectFlags() - p.parse(i) - s = str(p) - if not i in s: - print(s) - - \ No newline at end of file + if m & self.mask == m: + res += "1" + else: + res += "0" + m >>= 1 + return res + + def short_str(self): + return str(self).replace("-", "") + + def parse_full(self, s): + """parse a string with all flags""" + n = len(self.flag_txt) + if len(s) != n: + raise ValueError("full string size mismatch!") + mask = 0 + for i in range(n): + val = s[i] + ref = self.flag_txt[i] + ref_lo = ref.lower() + if val not in (ref, ref_lo, "-"): + raise ValueError("invalid protect char: " + val) + is_lo = ref == ref_lo + is_blank = val == "-" + if is_lo: + do_set = is_blank + else: + do_set = not is_blank + if do_set: + bit_pos = n - i - 1 + bit_mask = 1 << bit_pos + mask |= bit_mask + self.mask = mask + + def parse(self, s): + if len(s) == 0: + return + # allow to add with '+' or sub with '-' + n = self.flag_txt + mode = "+" + self.mask = self.flag_none + for a in s.lower(): + if a in "+-": + mode = a + else: + mask = None + is_low = None + for i in range(self.flag_num): + flg = self.flag_txt[i] + flg_low = flg.lower() + if flg_low == a: + mask = 1 << (self.flag_num - 1 - i) + is_low = flg_low == flg + break + if mask == None: + raise FSError(INVALID_PROTECT_FORMAT, extra="char: " + a) + # apply mask + if mode == "+": + if is_low: + self.mask &= ~mask + else: + self.mask |= mask + else: + if is_low: + self.mask |= mask + else: + self.mask &= ~mask + + def is_set(self, mask): + return self.mask & mask == 0 # LO active + + def set(self, mask): + self.mask &= ~mask + + def clr(self, mask): + self.mask |= mask + + def is_d(self): + return self.is_set(self.FIBF_DELETE) + + def is_e(self): + return self.is_set(self.FIBF_EXECUTE) + + def is_w(self): + return self.is_set(self.FIBF_WRITE) + + def is_r(self): + return self.is_set(self.FIBF_READ) + + +if __name__ == "__main__": + inp = ["h", "s", "p", "a", "r", "w", "e", "d"] + for i in inp: + p = ProtectFlags() + p.parse(i) + s = str(p) + if not i in s: + print(s) diff --git a/amitools/fs/Repacker.py b/amitools/fs/Repacker.py index 592341ca..ef3d8cde 100644 --- a/amitools/fs/Repacker.py +++ b/amitools/fs/Repacker.py @@ -1,84 +1,88 @@ - - - from .ADFSVolume import ADFSVolume from amitools.fs.blkdev.BlkDevFactory import BlkDevFactory + class Repacker: - def __init__(self, in_image_file, in_options=None): - self.in_image_file = in_image_file - self.in_options = in_options - self.in_blkdev = None - self.out_blkdev = None - self.in_volume = None - self.out_volume = None - - def create_in_blkdev(self): - f = BlkDevFactory() - self.in_blkdev = f.open(self.in_image_file, read_only=True, options=self.in_options) - return self.in_blkdev - - def create_in_volume(self): - if self.in_blkdev == None: - return None - self.in_volume = ADFSVolume(self.in_blkdev) - self.in_volume.open() - return self.in_volume - - def create_in(self): - if self.create_in_blkdev() == None: - return False - if self.create_in_volume() == None: - return False - return True - - def create_out_blkdev(self, image_file, force=True, options=None): - if self.in_blkdev == None: - return None - # clone geo from input - if options == None: - options = self.in_blkdev.get_options() - f = BlkDevFactory() - self.out_blkdev = f.create(image_file, force=force, options=options) - return self.out_blkdev - - def create_out_volume(self, blkdev=None): - if blkdev != None: - self.out_blkdev = blkdev - if self.out_blkdev == None: - return None - if self.in_volume == None: - return None - # clone input volume - iv = self.in_volume - name = iv.get_volume_name() - dos_type = iv.get_dos_type() - meta_info = iv.get_meta_info() - boot_code = iv.get_boot_code() - self.out_volume = ADFSVolume(self.out_blkdev) - self.out_volume.create(name, meta_info=meta_info, dos_type=dos_type, boot_code=boot_code) - return self.out_volume - - def repack(self): - self.repack_node_dir(self.in_volume.get_root_dir(), self.out_volume.get_root_dir()) - - def repack_node_dir(self, in_root, out_root): - entries = in_root.get_entries() - for e in entries: - self.repack_node(e, out_root) - - def repack_node(self, in_node, out_dir): - name = in_node.get_file_name().get_name() - meta_info = in_node.get_meta_info() - # sub dir - if in_node.is_dir(): - sub_dir = out_dir.create_dir(name, meta_info, False) - for child in in_node.get_entries(): - self.repack_node(child, sub_dir) - sub_dir.flush() - # file - elif in_node.is_file(): - data = in_node.get_file_data() - out_file = out_dir.create_file(name, data, meta_info, False) - out_file.flush() - in_node.flush() + def __init__(self, in_image_file, in_options=None): + self.in_image_file = in_image_file + self.in_options = in_options + self.in_blkdev = None + self.out_blkdev = None + self.in_volume = None + self.out_volume = None + + def create_in_blkdev(self): + f = BlkDevFactory() + self.in_blkdev = f.open( + self.in_image_file, read_only=True, options=self.in_options + ) + return self.in_blkdev + + def create_in_volume(self): + if self.in_blkdev == None: + return None + self.in_volume = ADFSVolume(self.in_blkdev) + self.in_volume.open() + return self.in_volume + + def create_in(self): + if self.create_in_blkdev() == None: + return False + if self.create_in_volume() == None: + return False + return True + + def create_out_blkdev(self, image_file, force=True, options=None): + if self.in_blkdev == None: + return None + # clone geo from input + if options == None: + options = self.in_blkdev.get_options() + f = BlkDevFactory() + self.out_blkdev = f.create(image_file, force=force, options=options) + return self.out_blkdev + + def create_out_volume(self, blkdev=None): + if blkdev != None: + self.out_blkdev = blkdev + if self.out_blkdev == None: + return None + if self.in_volume == None: + return None + # clone input volume + iv = self.in_volume + name = iv.get_volume_name() + dos_type = iv.get_dos_type() + meta_info = iv.get_meta_info() + boot_code = iv.get_boot_code() + self.out_volume = ADFSVolume(self.out_blkdev) + self.out_volume.create( + name, meta_info=meta_info, dos_type=dos_type, boot_code=boot_code + ) + return self.out_volume + + def repack(self): + self.repack_node_dir( + self.in_volume.get_root_dir(), self.out_volume.get_root_dir() + ) + + def repack_node_dir(self, in_root, out_root): + entries = in_root.get_entries() + for e in entries: + self.repack_node(e, out_root) + + def repack_node(self, in_node, out_dir): + name = in_node.get_file_name().get_name() + meta_info = in_node.get_meta_info() + # sub dir + if in_node.is_dir(): + sub_dir = out_dir.create_dir(name, meta_info, False) + for child in in_node.get_entries(): + self.repack_node(child, sub_dir) + sub_dir.flush() + # file + elif in_node.is_file(): + data = in_node.get_file_data() + out_file = out_dir.create_file(name, data, meta_info, False) + out_file.flush() + in_node.flush() diff --git a/amitools/fs/RootMetaInfo.py b/amitools/fs/RootMetaInfo.py index c2411bfd..492be35d 100644 --- a/amitools/fs/RootMetaInfo.py +++ b/amitools/fs/RootMetaInfo.py @@ -1,116 +1,112 @@ - - - import time from .TimeStamp import * + class RootMetaInfo: - def __init__(self, create_ts=None, disk_ts=None, mod_ts=None): - self.set_create_ts(create_ts) - self.set_disk_ts(disk_ts) - self.set_mod_ts(mod_ts) - - def __str__(self): - res = [] - res.append(self.get_create_time_str()) - res.append(self.get_disk_time_str()) - res.append(self.get_mod_time_str()) - return " ".join(res) - - # create_ts - def set_create_time(self, create_time): - self.create_time = create_time - if self.create_time != None: - self.create_ts = TimeStamp() - self.create_ts.from_secs(create_time) - else: - self.create_ts = None - - def set_create_ts(self, create_ts): - self.create_ts = create_ts - if self.create_ts != None: - self.create_time = self.create_ts.get_secsf() - else: - self.create_time = None - - def get_create_time(self): - return self.create_time - - def get_create_ts(self): - return self.create_ts - - def get_create_time_str(self): - if self.create_ts != None: - return str(self.create_ts) - else: - return ts_empty_string - - def set_current_as_create_time(self): - create_time = time.mktime(time.localtime()) - self.set_create_time(create_time) - - # disk_ts - def set_disk_time(self, disk_time): - self.disk_time = disk_time - if self.disk_time != None: - self.disk_ts = TimeStamp() - self.disk_ts.from_secs(disk_time) - else: - self.disk_ts = None - - def set_disk_ts(self, disk_ts): - self.disk_ts = disk_ts - if self.disk_ts != None: - self.disk_time = self.disk_ts.get_secsf() - else: - self.disk_time = None - - def get_disk_time(self): - return self.disk_time - - def get_disk_ts(self): - return self.disk_ts - - def get_disk_time_str(self): - if self.disk_ts != None: - return str(self.disk_ts) - else: - return ts_empty_string - - def set_current_as_disk_time(self): - disk_time = time.mktime(time.localtime()) - self.set_disk_time(disk_time) - - # mod_ts - def set_mod_time(self, mod_time): - self.mod_time = mod_time - if self.mod_time != None: - self.mod_ts = TimeStamp() - self.mod_ts.from_secs(mod_time) - else: - self.mod_ts = None - - def set_mod_ts(self, mod_ts): - self.mod_ts = mod_ts - if self.mod_ts != None: - self.mod_time = self.mod_ts.get_secsf() - else: - self.mod_time = None - - def get_mod_time(self): - return self.mod_time - - def get_mod_ts(self): - return self.mod_ts - - def get_mod_time_str(self): - if self.mod_ts != None: - return str(self.mod_ts) - else: - return ts_empty_string - - def set_current_as_mod_time(self): - mod_time = time.mktime(time.localtime()) - self.set_mod_time(mod_time) - - \ No newline at end of file + def __init__(self, create_ts=None, disk_ts=None, mod_ts=None): + self.set_create_ts(create_ts) + self.set_disk_ts(disk_ts) + self.set_mod_ts(mod_ts) + + def __str__(self): + res = [] + res.append(self.get_create_time_str()) + res.append(self.get_disk_time_str()) + res.append(self.get_mod_time_str()) + return " ".join(res) + + # create_ts + def set_create_time(self, create_time): + self.create_time = create_time + if self.create_time != None: + self.create_ts = TimeStamp() + self.create_ts.from_secs(create_time) + else: + self.create_ts = None + + def set_create_ts(self, create_ts): + self.create_ts = create_ts + if self.create_ts != None: + self.create_time = self.create_ts.get_secsf() + else: + self.create_time = None + + def get_create_time(self): + return self.create_time + + def get_create_ts(self): + return self.create_ts + + def get_create_time_str(self): + if self.create_ts != None: + return str(self.create_ts) + else: + return ts_empty_string + + def set_current_as_create_time(self): + create_time = time.mktime(time.localtime()) + self.set_create_time(create_time) + + # disk_ts + def set_disk_time(self, disk_time): + self.disk_time = disk_time + if self.disk_time != None: + self.disk_ts = TimeStamp() + self.disk_ts.from_secs(disk_time) + else: + self.disk_ts = None + + def set_disk_ts(self, disk_ts): + self.disk_ts = disk_ts + if self.disk_ts != None: + self.disk_time = self.disk_ts.get_secsf() + else: + self.disk_time = None + + def get_disk_time(self): + return self.disk_time + + def get_disk_ts(self): + return self.disk_ts + + def get_disk_time_str(self): + if self.disk_ts != None: + return str(self.disk_ts) + else: + return ts_empty_string + + def set_current_as_disk_time(self): + disk_time = time.mktime(time.localtime()) + self.set_disk_time(disk_time) + + # mod_ts + def set_mod_time(self, mod_time): + self.mod_time = mod_time + if self.mod_time != None: + self.mod_ts = TimeStamp() + self.mod_ts.from_secs(mod_time) + else: + self.mod_ts = None + + def set_mod_ts(self, mod_ts): + self.mod_ts = mod_ts + if self.mod_ts != None: + self.mod_time = self.mod_ts.get_secsf() + else: + self.mod_time = None + + def get_mod_time(self): + return self.mod_time + + def get_mod_ts(self): + return self.mod_ts + + def get_mod_time_str(self): + if self.mod_ts != None: + return str(self.mod_ts) + else: + return ts_empty_string + + def set_current_as_mod_time(self): + mod_time = time.mktime(time.localtime()) + self.set_mod_time(mod_time) diff --git a/amitools/fs/TimeStamp.py b/amitools/fs/TimeStamp.py index db2dc25f..7cd5daa1 100644 --- a/amitools/fs/TimeStamp.py +++ b/amitools/fs/TimeStamp.py @@ -1,6 +1,3 @@ - - - import time ts_empty_string = "--.--.---- --:--:--.--" @@ -10,78 +7,80 @@ # which is 1970, but Amiga specs say that 1978 is the base year. amiga_epoch = time.mktime(time.strptime("01.01.1978 00:00:00", ts_format)) + class TimeStamp: - def __init__(self, days=0, mins=0, ticks=0): - self.days = days - self.mins = mins - self.ticks = ticks - self.secs = days * 24 * 60 * 60 + mins * 60 + (ticks // 50) - self.sub_secs = (ticks % 50) - - def __str__(self): - t = time.localtime(self.secs + amiga_epoch) - ts = time.strftime(ts_format, t) - return "%s.%02d" % (ts, self.sub_secs) - - def format(self, my_format): - t = time.localtime(self.secs + amiga_epoch) - return time.strftime(my_format, t) + def __init__(self, days=0, mins=0, ticks=0): + self.days = days + self.mins = mins + self.ticks = ticks + self.secs = days * 24 * 60 * 60 + mins * 60 + (ticks // 50) + self.sub_secs = ticks % 50 + + def __str__(self): + t = time.localtime(self.secs + amiga_epoch) + ts = time.strftime(ts_format, t) + return "%s.%02d" % (ts, self.sub_secs) + + def format(self, my_format): + t = time.localtime(self.secs + amiga_epoch) + return time.strftime(my_format, t) + + def get_secsf(self): + return self.secs + self.sub_secs / 50.0 + + def get_secs(self): + return self.secs + + def get_sub_secs(self): + return self.sub_secs + + def from_secs(self, secs, sub_secs=0): + secs = int(secs - amiga_epoch) + ticks = secs * 50 + mins = ticks // (50 * 60) + self.ticks = ticks % (50 * 60) + self.days = mins // (60 * 24) + self.mins = mins % (60 * 24) + self.secs = secs + self.sub_secs = sub_secs + + def parse(self, s): + # check for ticks + s = s.strip() + ticks = 0 + if len(s) > 3: + # ticks + t = s[-3:] + # old notation ' t00' + if t[0] == "t" and t[1:].isdigit(): + ticks = int(t[1:]) + s = s[:-4] + # new notation '.00' + elif t[0] == "." and t[1:].isdigit(): + ticks = int(t[1:]) + s = s[:-3] + # parse normal time + try: + ts = time.strptime(s, ts_format) + secs = int(time.mktime(ts)) + self.from_secs(secs) + self.sub_secs = ticks + self.ticks += ticks + return True + except ValueError: + return False + - def get_secsf(self): - return self.secs + self.sub_secs / 50.0 - - def get_secs(self): - return self.secs +if __name__ == "__main__": + ts = TimeStamp() + ts.from_secs(123) + ts2 = TimeStamp(days=ts.days, mins=ts.mins, ticks=ts.ticks) + if ts2.get_secs() != 123: + print("FAIL") - def get_sub_secs(self): - return self.sub_secs - - def from_secs(self, secs, sub_secs=0): - secs = int(secs - amiga_epoch) - ticks = secs * 50 - mins = ticks // (50 * 60) - self.ticks = ticks % (50 * 60) - self.days = mins // (60 * 24) - self.mins = mins % (60 * 24) - self.secs = secs - self.sub_secs = sub_secs - - def parse(self, s): - # check for ticks - s = s.strip() - ticks = 0 - if len(s) > 3: - # ticks - t = s[-3:] - # old notation ' t00' - if t[0] == 't' and t[1:].isdigit(): - ticks = int(t[1:]) - s = s[:-4] - # new notation '.00' - elif t[0] == '.' and t[1:].isdigit(): - ticks = int(t[1:]) - s = s[:-3] - # parse normal time - try: - ts = time.strptime(s, ts_format) - secs = int(time.mktime(ts)) - self.from_secs(secs) - self.sub_secs = ticks - self.ticks += ticks - return True - except ValueError: - return False - -if __name__ == '__main__': - ts = TimeStamp() - ts.from_secs(123) - ts2 = TimeStamp(days=ts.days, mins=ts.mins, ticks=ts.ticks) - if ts2.get_secs() != 123: - print("FAIL") - - ts = TimeStamp() - s = "05.01.2012 21:47:34 t40" - ts.parse(s) - txt = str(ts) - if s != txt: - print("FAIL") + ts = TimeStamp() + s = "05.01.2012 21:47:34 t40" + ts.parse(s) + txt = str(ts) + if s != txt: + print("FAIL") diff --git a/amitools/fs/blkdev/ADFBlockDevice.py b/amitools/fs/blkdev/ADFBlockDevice.py index 27f34342..005085e8 100644 --- a/amitools/fs/blkdev/ADFBlockDevice.py +++ b/amitools/fs/blkdev/ADFBlockDevice.py @@ -1,122 +1,132 @@ - - - from .BlockDevice import BlockDevice import ctypes import gzip import io -class ADFBlockDevice(BlockDevice): - def __init__(self, adf_file, read_only=False, fobj=None): - self.adf_file = adf_file - self.read_only = read_only - self.fobj = fobj - self.dirty = False - lo = adf_file.lower() - self.gzipped = lo.endswith('.adz') or lo.endswith('.adf.gz') - def create(self): - if self.read_only: - raise IOError("ADF creation not allowed in read-only mode!") - self._set_geometry() # set default geometry - # allocate image in memory - self.data = ctypes.create_string_buffer(self.num_bytes) - self.dirty = True +class ADFBlockDevice(BlockDevice): + def __init__(self, adf_file, read_only=False, fobj=None): + self.adf_file = adf_file + self.read_only = read_only + self.fobj = fobj + self.dirty = False + lo = adf_file.lower() + self.gzipped = lo.endswith(".adz") or lo.endswith(".adf.gz") - def open(self): - self._set_geometry() # set default geometry - close = True - # open adf file via fobj - if self.fobj is not None: - if self.gzipped: - fh = gzip.GzipFile(self.adf_file, "rb", fileobj=self.fobj) - else: - fh = self.fobj - close = False - # open adf file - else: - if self.gzipped: - fh = gzip.open(self.adf_file,"rb") - else: - fh = io.open(self.adf_file, "rb") - # read image - data = fh.read(self.num_bytes) - # close input file - if close: - fh.close() - # check size - if len(data) != self.num_bytes: - raise IOError("Invalid ADF Size: got %d but expected %d" % (len(data), self.num_bytes)) - # create modifyable data - if self.read_only: - self.data = data - else: - self.data = ctypes.create_string_buffer(self.num_bytes) - self.data[:] = data + def create(self): + if self.read_only: + raise IOError("ADF creation not allowed in read-only mode!") + self._set_geometry() # set default geometry + # allocate image in memory + self.data = ctypes.create_string_buffer(self.num_bytes) + self.dirty = True - def flush(self): - # write dirty adf - if self.dirty and not self.read_only: - close = True - if self.fobj is not None: - # seek fobj to beginning - self.fobj.seek(0,0) - if self.gzipped: - fh = gzip.GzipFile(self.adf_file, "wb", fileobj=self.fobj) + def open(self): + self._set_geometry() # set default geometry + close = True + # open adf file via fobj + if self.fobj is not None: + if self.gzipped: + fh = gzip.GzipFile(self.adf_file, "rb", fileobj=self.fobj) + else: + fh = self.fobj + close = False + # open adf file else: - fh = self.fobj - close = False - else: - if self.gzipped: - fh = gzip.open(self.adf_file,"wb") + if self.gzipped: + fh = gzip.open(self.adf_file, "rb") + else: + fh = io.open(self.adf_file, "rb") + # read image + data = fh.read(self.num_bytes) + # close input file + if close: + fh.close() + # check size + if len(data) != self.num_bytes: + raise IOError( + "Invalid ADF Size: got %d but expected %d" % (len(data), self.num_bytes) + ) + # create modifyable data + if self.read_only: + self.data = data else: - fh = io.open(self.adf_file, "wb") - # write image - fh.write(self.data) - # close file - if close: - fh.close() - self.dirty = False + self.data = ctypes.create_string_buffer(self.num_bytes) + self.data[:] = data + + def flush(self): + # write dirty adf + if self.dirty and not self.read_only: + close = True + if self.fobj is not None: + # seek fobj to beginning + self.fobj.seek(0, 0) + if self.gzipped: + fh = gzip.GzipFile(self.adf_file, "wb", fileobj=self.fobj) + else: + fh = self.fobj + close = False + else: + if self.gzipped: + fh = gzip.open(self.adf_file, "wb") + else: + fh = io.open(self.adf_file, "wb") + # write image + fh.write(self.data) + # close file + if close: + fh.close() + self.dirty = False - def close(self): - self.flush() - self.data = None - # now close fobj - if self.fobj is not None: - self.fobj.close() + def close(self): + self.flush() + self.data = None + # now close fobj + if self.fobj is not None: + self.fobj.close() - def read_block(self, blk_num): - if blk_num >= self.num_blocks: - raise ValueError("Invalid ADF block num: got %d but max is %d" % (blk_num, self.num_blocks)) - off = self._blk_to_offset(blk_num) - return self.data[off:off+self.block_bytes] + def read_block(self, blk_num): + if blk_num >= self.num_blocks: + raise ValueError( + "Invalid ADF block num: got %d but max is %d" + % (blk_num, self.num_blocks) + ) + off = self._blk_to_offset(blk_num) + return self.data[off : off + self.block_bytes] - def write_block(self, blk_num, data): - if self.read_only: - raise IOError("ADF File is read-only!") - if blk_num >= self.num_blocks: - raise ValueError("Invalid ADF block num: got %d but max is %d" % (blk_num, self.num_blocks)) - if len(data) != self.block_bytes: - raise ValueError("Invalid ADF block size written: got %d but size is %d" % (len(data), self.block_bytes)) - off = self._blk_to_offset(blk_num) - self.data[off:off+self.block_bytes] = data - self.dirty = True + def write_block(self, blk_num, data): + if self.read_only: + raise IOError("ADF File is read-only!") + if blk_num >= self.num_blocks: + raise ValueError( + "Invalid ADF block num: got %d but max is %d" + % (blk_num, self.num_blocks) + ) + if len(data) != self.block_bytes: + raise ValueError( + "Invalid ADF block size written: got %d but size is %d" + % (len(data), self.block_bytes) + ) + off = self._blk_to_offset(blk_num) + self.data[off : off + self.block_bytes] = data + self.dirty = True # --- mini test --- -if __name__ == '__main__': - import sys - for a in sys.argv[1:]: - # write to file device - adf = ADFBlockDevice(a) - adf.open() - d = adf.read_block(0) - adf.write_block(0, d) - adf.close() - # write via fobj - fobj = open(a, "rb") - adf = ADFBlockDevice(a, fobj=fobj) - adf.open() - d = adf.read_block(0) - adf.write_block(0, d) - adf.close() +if __name__ == "__main__": + import sys + + for a in sys.argv[1:]: + # write to file device + adf = ADFBlockDevice(a) + adf.open() + d = adf.read_block(0) + adf.write_block(0, d) + adf.close() + # write via fobj + fobj = open(a, "rb") + adf = ADFBlockDevice(a, fobj=fobj) + adf.open() + d = adf.read_block(0) + adf.write_block(0, d) + adf.close() diff --git a/amitools/fs/blkdev/BlkDevFactory.py b/amitools/fs/blkdev/BlkDevFactory.py index a0a96af0..80ce7741 100644 --- a/amitools/fs/blkdev/BlkDevFactory.py +++ b/amitools/fs/blkdev/BlkDevFactory.py @@ -1,6 +1,3 @@ - - - import os import os.path import stat @@ -12,197 +9,200 @@ from amitools.fs.rdb.RDisk import RDisk import amitools.util.BlkDevTools as BlkDevTools + class BlkDevFactory: - """the block device factory opens or creates image files suitable as a block device for file system access.""" + """the block device factory opens or creates image files suitable as a block device for file system access.""" - valid_extensions = ('.adf','.adz','.adf.gz','.hdf','.rdisk') + valid_extensions = (".adf", ".adz", ".adf.gz", ".hdf", ".rdisk") - TYPE_ADF = 1 - TYPE_HDF = 2 - TYPE_RDISK = 3 + TYPE_ADF = 1 + TYPE_HDF = 2 + TYPE_RDISK = 3 - def detect_type(self, img_file, fobj, options=None): - """try to detect the type of a given img_file name""" - # 1. take type from options - t = self.type_from_options(options) - if t == None: - # 2. look in file - t = self.type_from_contents(img_file, fobj) - if t == None: - # 3. from extension - t = self.type_from_extension(img_file) - return t + def detect_type(self, img_file, fobj, options=None): + """try to detect the type of a given img_file name""" + # 1. take type from options + t = self.type_from_options(options) + if t == None: + # 2. look in file + t = self.type_from_contents(img_file, fobj) + if t == None: + # 3. from extension + t = self.type_from_extension(img_file) + return t - def type_from_options(self, options): - """look in options for type""" - if options != None: - if 'type' in options: - t = options['type'].lower() - if t in ('adf','adz'): - return self.TYPE_ADF - elif t == 'hdf': - return self.TYPE_HDF - elif t == 'rdisk': - return self.TYPE_RDISK - return None + def type_from_options(self, options): + """look in options for type""" + if options != None: + if "type" in options: + t = options["type"].lower() + if t in ("adf", "adz"): + return self.TYPE_ADF + elif t == "hdf": + return self.TYPE_HDF + elif t == "rdisk": + return self.TYPE_RDISK + return None - def type_from_contents(self, img_file, fobj): - """look in first 4 bytes for type of image""" - # load 4 bytes - if fobj is None: - # make sure file exists - if not os.path.exists(img_file): + def type_from_contents(self, img_file, fobj): + """look in first 4 bytes for type of image""" + # load 4 bytes + if fobj is None: + # make sure file exists + if not os.path.exists(img_file): + return None + f = open(img_file, "rb") + hdr = f.read(4) + f.close() + else: + hdr = fobj.read(4) + fobj.seek(0, 0) + # check for 'RDISK': + if hdr == b"RDSK": + return self.TYPE_RDISK return None - f = open(img_file, "rb") - hdr = f.read(4) - f.close() - else: - hdr = fobj.read(4) - fobj.seek(0,0) - # check for 'RDISK': - if hdr == b'RDSK': - return self.TYPE_RDISK - return None - def type_from_extension(self, img_file): - """look at file extension for type of image""" - ext = img_file.lower() - if ext.endswith('.adf') or ext.endswith('.adz') or ext.endswith('.adf.gz'): - return self.TYPE_ADF - elif ext.endswith(".hdf"): - return self.TYPE_HDF - elif ext.endswith(".rdsk"): - return self.TYPE_RDISK - else: - return None + def type_from_extension(self, img_file): + """look at file extension for type of image""" + ext = img_file.lower() + if ext.endswith(".adf") or ext.endswith(".adz") or ext.endswith(".adf.gz"): + return self.TYPE_ADF + elif ext.endswith(".hdf"): + return self.TYPE_HDF + elif ext.endswith(".rdsk"): + return self.TYPE_RDISK + else: + return None - def _get_block_size(self, options): - if options and 'bs' in options: - bs = int(options['bs']) - if bs % 512 != 0 and bs < 512: - raise ValueError("invalid block size given: %d" % bs) - return bs - else: - return 512 + def _get_block_size(self, options): + if options and "bs" in options: + bs = int(options["bs"]) + if bs % 512 != 0 and bs < 512: + raise ValueError("invalid block size given: %d" % bs) + return bs + else: + return 512 - def open(self, img_file, read_only=False, options=None, fobj=None, - none_if_missing=False): - """open an existing image file""" - # file base check - if fobj is None: - # make sure image file exists - if not os.path.exists(img_file): - if none_if_missing: - return None - raise IOError("image file not found") - # is readable? - if not os.access(img_file, os.R_OK): - raise IOError("can't read from image file") - # is writeable? -> no: enforce read_only - if not os.access(img_file, os.W_OK): - read_only = True - # check size - st = os.stat(img_file) - mode = st.st_mode - if stat.S_ISBLK(mode) or stat.S_ISCHR(mode): - size = BlkDevTools.getblkdevsize(img_file) - else: - size = os.path.getsize(img_file) - if size == 0: - raise IOError("image file is empty") - # fobj - else: - fobj.seek(0,2) - size = fobj.tell() - fobj.seek(0,0) - # detect type - t = self.detect_type(img_file, fobj, options) - if t == None: - raise IOError("can't detect type of image file") - # get block size - bs = self._get_block_size(options) - # create blkdev - if t == self.TYPE_ADF: - blkdev = ADFBlockDevice(img_file, read_only, fobj=fobj) - blkdev.open() - elif t == self.TYPE_HDF: - # detect geometry - geo = DiskGeometry(block_bytes=bs) - if not geo.detect(size, options): - raise IOError("can't detect geometry of HDF image file") - blkdev = HDFBlockDevice(img_file, read_only, fobj=fobj, block_size=bs) - blkdev.open(geo) - else: - rawdev = RawBlockDevice(img_file, read_only, fobj=fobj, block_bytes=bs) - rawdev.open() - # check block size stored in rdb - rdisk = RDisk(rawdev) - rdb_bs = rdisk.peek_block_size() - if rdb_bs != bs: - # adjust block size and re-open - rawdev.close() - bs = rdb_bs - rawdev = RawBlockDevice(img_file, read_only, fobj=fobj, block_bytes=bs) - rawdev.open() - rdisk = RDisk(rawdev) - if not rdisk.open(): - raise IOError("can't open rdisk of image file") - # determine partition - p = "0" - if options != None and 'part' in options: - p = str(options['part']) - part = rdisk.find_partition_by_string(p) - if part == None: - raise IOError("can't find partition in image file") - blkdev = part.create_blkdev(True) # auto_close rdisk - blkdev.open() - return blkdev + def open( + self, img_file, read_only=False, options=None, fobj=None, none_if_missing=False + ): + """open an existing image file""" + # file base check + if fobj is None: + # make sure image file exists + if not os.path.exists(img_file): + if none_if_missing: + return None + raise IOError("image file not found") + # is readable? + if not os.access(img_file, os.R_OK): + raise IOError("can't read from image file") + # is writeable? -> no: enforce read_only + if not os.access(img_file, os.W_OK): + read_only = True + # check size + st = os.stat(img_file) + mode = st.st_mode + if stat.S_ISBLK(mode) or stat.S_ISCHR(mode): + size = BlkDevTools.getblkdevsize(img_file) + else: + size = os.path.getsize(img_file) + if size == 0: + raise IOError("image file is empty") + # fobj + else: + fobj.seek(0, 2) + size = fobj.tell() + fobj.seek(0, 0) + # detect type + t = self.detect_type(img_file, fobj, options) + if t == None: + raise IOError("can't detect type of image file") + # get block size + bs = self._get_block_size(options) + # create blkdev + if t == self.TYPE_ADF: + blkdev = ADFBlockDevice(img_file, read_only, fobj=fobj) + blkdev.open() + elif t == self.TYPE_HDF: + # detect geometry + geo = DiskGeometry(block_bytes=bs) + if not geo.detect(size, options): + raise IOError("can't detect geometry of HDF image file") + blkdev = HDFBlockDevice(img_file, read_only, fobj=fobj, block_size=bs) + blkdev.open(geo) + else: + rawdev = RawBlockDevice(img_file, read_only, fobj=fobj, block_bytes=bs) + rawdev.open() + # check block size stored in rdb + rdisk = RDisk(rawdev) + rdb_bs = rdisk.peek_block_size() + if rdb_bs != bs: + # adjust block size and re-open + rawdev.close() + bs = rdb_bs + rawdev = RawBlockDevice(img_file, read_only, fobj=fobj, block_bytes=bs) + rawdev.open() + rdisk = RDisk(rawdev) + if not rdisk.open(): + raise IOError("can't open rdisk of image file") + # determine partition + p = "0" + if options != None and "part" in options: + p = str(options["part"]) + part = rdisk.find_partition_by_string(p) + if part == None: + raise IOError("can't find partition in image file") + blkdev = part.create_blkdev(True) # auto_close rdisk + blkdev.open() + return blkdev - def create(self, img_file, force=True, options=None, fobj=None): - if fobj is None: - # make sure we are allowed to overwrite existing file - if os.path.exists(img_file): - if not force: - raise IOError("can't overwrite existing image file") - # not writeable? - if not os.access(img_file, os.W_OK): - raise IOError("can't write image file") - # detect type - t = self.detect_type(img_file, fobj, options) - if t == None: - raise IOError("can't detect type of image file") - if t == self.TYPE_RDISK: - raise IOError("can't create rdisk. use rdbtool first") - # get block size - bs = self._get_block_size(options) - # create blkdev - if t == self.TYPE_ADF: - blkdev = ADFBlockDevice(img_file, fobj=fobj) - blkdev.create() - else: - # determine geometry from size or chs - geo = DiskGeometry() - if not geo.setup(options): - raise IOError("can't determine geometry of HDF image file") - blkdev = HDFBlockDevice(img_file, fobj=fobj, block_size=bs) - blkdev.create(geo) - return blkdev + def create(self, img_file, force=True, options=None, fobj=None): + if fobj is None: + # make sure we are allowed to overwrite existing file + if os.path.exists(img_file): + if not force: + raise IOError("can't overwrite existing image file") + # not writeable? + if not os.access(img_file, os.W_OK): + raise IOError("can't write image file") + # detect type + t = self.detect_type(img_file, fobj, options) + if t == None: + raise IOError("can't detect type of image file") + if t == self.TYPE_RDISK: + raise IOError("can't create rdisk. use rdbtool first") + # get block size + bs = self._get_block_size(options) + # create blkdev + if t == self.TYPE_ADF: + blkdev = ADFBlockDevice(img_file, fobj=fobj) + blkdev.create() + else: + # determine geometry from size or chs + geo = DiskGeometry() + if not geo.setup(options): + raise IOError("can't determine geometry of HDF image file") + blkdev = HDFBlockDevice(img_file, fobj=fobj, block_size=bs) + blkdev.create(geo) + return blkdev # --- mini test --- -if __name__ == '__main__': - import sys - import io - bdf = BlkDevFactory() - for a in sys.argv[1:]: - # open by file - blkdev = bdf.open(a) - print(a, blkdev.__class__.__name__) - blkdev.close() - # open via fobj - fobj = open(a,"rb") - data = fobj.read() - nobj = io.StringIO(data) - blkdev = bdf.open("bluna"+a, fobj=nobj) - print(a, blkdev.__class__.__name__) - blkdev.close() +if __name__ == "__main__": + import sys + import io + + bdf = BlkDevFactory() + for a in sys.argv[1:]: + # open by file + blkdev = bdf.open(a) + print(a, blkdev.__class__.__name__) + blkdev.close() + # open via fobj + fobj = open(a, "rb") + data = fobj.read() + nobj = io.StringIO(data) + blkdev = bdf.open("bluna" + a, fobj=nobj) + print(a, blkdev.__class__.__name__) + blkdev.close() diff --git a/amitools/fs/blkdev/BlockDevice.py b/amitools/fs/blkdev/BlockDevice.py index 57b6e85d..02f08c78 100644 --- a/amitools/fs/blkdev/BlockDevice.py +++ b/amitools/fs/blkdev/BlockDevice.py @@ -1,54 +1,65 @@ - - - # a block device defines a set of blocks used by a file system from .DiskGeometry import DiskGeometry + class BlockDevice: - def _set_geometry(self, cyls=80, heads=2, sectors=11, block_bytes=512, reserved=2, bootblocks=2): - self.cyls = cyls - self.heads = heads - self.sectors = sectors - self.block_bytes = block_bytes - self.reserved = reserved - self.bootblocks = bootblocks - # derived values - self.num_tracks = self.cyls * self.heads - self.num_blocks = self.num_tracks * self.sectors - self.num_bytes = self.num_blocks * self.block_bytes - self.block_longs = self.block_bytes // 4 - self.num_longs = self.num_blocks * self.block_longs - - def dump(self): - print("cylinders: ", self.cyls) - print("heads: ", self.heads) - print("sectors: ", self.sectors) - print("block_bytes:", self.block_bytes) - print("reserved: ", self.reserved) - print("bootblocks: ", self.bootblocks) - - def _blk_to_offset(self, blk_num): - return self.block_bytes * blk_num - - # ----- API ----- - def create(self, **args): - pass - def open(self): - pass - def close(self): - pass - def flush(self): - pass - def read_block(self, blk_num): - pass - def write_block(self, blk_num, data): - pass - def get_geometry(self): - return DiskGeometry(self.cyls, self.heads, self.sectors) - def get_chs_str(self): - return "chs=%d,%d,%d" % (self.cyls, self.heads, self.sectors) - def get_options(self): - return { 'chs' : "%d,%d,%d" % (self.cyls, self.heads, self.sectors), - 'bs' : self.block_bytes } - def get_block_size_str(self): - return "bs=%d" % self.block_bytes + def _set_geometry( + self, cyls=80, heads=2, sectors=11, block_bytes=512, reserved=2, bootblocks=2 + ): + self.cyls = cyls + self.heads = heads + self.sectors = sectors + self.block_bytes = block_bytes + self.reserved = reserved + self.bootblocks = bootblocks + # derived values + self.num_tracks = self.cyls * self.heads + self.num_blocks = self.num_tracks * self.sectors + self.num_bytes = self.num_blocks * self.block_bytes + self.block_longs = self.block_bytes // 4 + self.num_longs = self.num_blocks * self.block_longs + + def dump(self): + print("cylinders: ", self.cyls) + print("heads: ", self.heads) + print("sectors: ", self.sectors) + print("block_bytes:", self.block_bytes) + print("reserved: ", self.reserved) + print("bootblocks: ", self.bootblocks) + + def _blk_to_offset(self, blk_num): + return self.block_bytes * blk_num + + # ----- API ----- + def create(self, **args): + pass + + def open(self): + pass + + def close(self): + pass + + def flush(self): + pass + + def read_block(self, blk_num): + pass + + def write_block(self, blk_num, data): + pass + + def get_geometry(self): + return DiskGeometry(self.cyls, self.heads, self.sectors) + + def get_chs_str(self): + return "chs=%d,%d,%d" % (self.cyls, self.heads, self.sectors) + + def get_options(self): + return { + "chs": "%d,%d,%d" % (self.cyls, self.heads, self.sectors), + "bs": self.block_bytes, + } + + def get_block_size_str(self): + return "bs=%d" % self.block_bytes diff --git a/amitools/fs/blkdev/DiskGeometry.py b/amitools/fs/blkdev/DiskGeometry.py index 20a164af..a4449e3c 100644 --- a/amitools/fs/blkdev/DiskGeometry.py +++ b/amitools/fs/blkdev/DiskGeometry.py @@ -1,182 +1,189 @@ import amitools.util.ByteSize as ByteSize + class DiskGeometry: - def __init__(self, cyls=0, heads=0, secs=0, block_bytes=512): - self.cyls = cyls - self.heads = heads - self.secs = secs - self.block_bytes = block_bytes - - def __str__(self): - size = self.get_num_bytes() - return "chs=%d,%d,%d bs=%d size=%d/%s" % (self.cyls, self.heads, self.secs, - self.block_bytes, size, ByteSize.to_byte_size_str(size)) - - def get_num_blocks(self): - """return the number of block allocated by geometry""" - return self.cyls * self.heads * self.secs - - def get_num_bytes(self): - """return the number of bytes allocated by geometry""" - return self.get_num_blocks() * self.block_bytes - - def _update_block_size(self, options): - if options and 'bs' in options: - bs = int(options['bs']) - if bs % 512 != 0 or bs < 512: - raise ValueError("invalid block size given: %d" % bs) - self.block_bytes = bs + def __init__(self, cyls=0, heads=0, secs=0, block_bytes=512): + self.cyls = cyls + self.heads = heads + self.secs = secs + self.block_bytes = block_bytes + + def __str__(self): + size = self.get_num_bytes() + return "chs=%d,%d,%d bs=%d size=%d/%s" % ( + self.cyls, + self.heads, + self.secs, + self.block_bytes, + size, + ByteSize.to_byte_size_str(size), + ) + + def get_num_blocks(self): + """return the number of block allocated by geometry""" + return self.cyls * self.heads * self.secs - def detect(self, byte_size, options=None): - """detect a geometry from a given image size and optional options. + def get_num_bytes(self): + """return the number of bytes allocated by geometry""" + return self.get_num_blocks() * self.block_bytes + + def _update_block_size(self, options): + if options and "bs" in options: + bs = int(options["bs"]) + if bs % 512 != 0 or bs < 512: + raise ValueError("invalid block size given: %d" % bs) + self.block_bytes = bs + + def detect(self, byte_size, options=None): + """detect a geometry from a given image size and optional options. return bytes required by geometry or None if geomtry is invalid """ - c = None - h = None - s = None - self._update_block_size(options) - num_blocks = byte_size // self.block_bytes - algo = None - if options != None: - (c, h, s) = self._parse_chs(options) - if 'algo' in options: - algo = int(options['algo']) - # chs if fully specified then take this one - if c != None and h != None and s != None: - self.cyls = c - self.heads = h - self.secs = s - size = self.get_num_bytes() - if size == byte_size: - return size - else: - return None - else: - return self._guess_for_size(byte_size, algo=algo, secs=s, heads=h) - - def setup(self, options): - """setup a new geometry by giving options + c = None + h = None + s = None + self._update_block_size(options) + num_blocks = byte_size // self.block_bytes + algo = None + if options != None: + (c, h, s) = self._parse_chs(options) + if "algo" in options: + algo = int(options["algo"]) + # chs if fully specified then take this one + if c != None and h != None and s != None: + self.cyls = c + self.heads = h + self.secs = s + size = self.get_num_bytes() + if size == byte_size: + return size + else: + return None + else: + return self._guess_for_size(byte_size, algo=algo, secs=s, heads=h) + + def setup(self, options): + """setup a new geometry by giving options return bytes required by geometry or None if geometry is invalid """ - if options == None: - return False - c = None - h = None - s = None - (c, h, s) = self._parse_chs(options) - self._update_block_size(options) - # chs is fully specified - if c != None and h != None and s != None: - self.cyls = c - self.heads = h - self.secs = s - return self.get_num_bytes() - else: - # we require a size - if 'size' not in options: - return None - # parse size - size = options['size'] - if type(size) != int: - size = ByteSize.parse_byte_size_str(size) - if size == None: - return None - # select guess algo - algo = None - if 'algo' in options: - algo = int(options['algo']) - # guess size - return self._guess_for_size(size, approx=True, algo=algo, secs=s, heads=h) + if options == None: + return False + c = None + h = None + s = None + (c, h, s) = self._parse_chs(options) + self._update_block_size(options) + # chs is fully specified + if c != None and h != None and s != None: + self.cyls = c + self.heads = h + self.secs = s + return self.get_num_bytes() + else: + # we require a size + if "size" not in options: + return None + # parse size + size = options["size"] + if type(size) != int: + size = ByteSize.parse_byte_size_str(size) + if size == None: + return None + # select guess algo + algo = None + if "algo" in options: + algo = int(options["algo"]) + # guess size + return self._guess_for_size(size, approx=True, algo=algo, secs=s, heads=h) + + def _parse_chs(self, options): + c = None + h = None + s = None + # chs=,, + if "chs" in options: + comp = options["chs"].split(",") + if len(comp) == 3: + return [int(x) for x in comp] + else: + if "s" in options: + s = int(options["s"]) + if "h" in options: + h = int(options["h"]) + if "c" in options: + c = int(options["c"]) + return (c, h, s) - def _parse_chs(self, options): - c = None - h = None - s = None - # chs=,, - if 'chs' in options: - comp = options['chs'].split(',') - if len(comp) == 3: - return [int(x) for x in comp] - else: - if 's' in options: - s = int(options['s']) - if 'h' in options: - h = int(options['h']) - if 'c' in options: - c = int(options['c']) - return (c,h,s) + def _guess_for_size1(self, size, approx=True, secs=None, heads=None): + mb = size // 1024 + if secs == None: + secs = 63 + if heads == None: + if mb <= 504 * 1024: + heads = 16 + elif mb <= 1008 * 1024: + heads = 32 + elif mb <= 2016 * 1024: + heads = 64 + elif mb <= 4032 * 1024: + heads = 128 + else: + heads = 256 + cyls = (size // self.block_bytes) // (secs * heads) + geo_size = cyls * secs * heads * self.block_bytes + # keep approx values or match + if approx or geo_size == size: + self.cyls = cyls + self.heads = heads + self.secs = secs + return geo_size + else: + return None - def _guess_for_size1(self, size, approx=True, secs=None, heads=None): - mb = size // 1024 - if secs == None: - secs = 63 - if heads == None: - if mb <= 504 * 1024: - heads = 16 - elif mb <= 1008 * 1024: - heads = 32 - elif mb <= 2016 * 1024: - heads = 64 - elif mb <= 4032 * 1024: - heads = 128 - else: - heads = 256 - cyls = (size // self.block_bytes) // (secs * heads) - geo_size = cyls * secs * heads * self.block_bytes - # keep approx values or match - if approx or geo_size == size: - self.cyls = cyls - self.heads = heads - self.secs = secs - return geo_size - else: - return None + def _guess_for_size2(self, size, approx=True, secs=None, heads=None): + if heads == None: + heads = 1 + if secs == None: + secs = 32 + cyls = (size // self.block_bytes) // (secs * heads) + # keep cyls low + while cyls > 65535: + cyls //= 2 + heads *= 2 + # keep approx values or match + geo_size = cyls * secs * heads * self.block_bytes + if approx or geo_size == size: + self.cyls = cyls + self.heads = heads + self.secs = secs + return geo_size + else: + return None - def _guess_for_size2(self, size, approx=True, secs=None, heads=None): - if heads == None: - heads = 1 - if secs == None: - secs = 32 - cyls = (size // self.block_bytes) // (secs * heads) - # keep cyls low - while cyls > 65535: - cyls //= 2 - heads *= 2 - # keep approx values or match - geo_size = cyls * secs * heads * self.block_bytes - if approx or geo_size == size: - self.cyls = cyls - self.heads = heads - self.secs = secs - return geo_size - else: - return None - - def _guess_for_size(self, size, approx=True, algo=None, secs=None, heads=None): - if algo == 1: - return self._guess_for_size1(size, approx, secs, heads) - elif algo == 2: - return self._guess_for_size2(size, approx, secs, heads) - else: - algos = [self._guess_for_size1, self._guess_for_size2] - if approx: - # find min diff to real size - min_diff = size - min_algo = None - for a in algos: - s = a(size, True, secs, heads) - if s != None: - delta = abs(size - s) - if delta < min_diff: - min_diff = delta - min_algo = a - if min_algo != None: - return min_algo(size, True, secs, heads) + def _guess_for_size(self, size, approx=True, algo=None, secs=None, heads=None): + if algo == 1: + return self._guess_for_size1(size, approx, secs, heads) + elif algo == 2: + return self._guess_for_size2(size, approx, secs, heads) else: - return None - else: # exact match - for a in algos: - s = a(size, True, secs, heads) - if s == size: - return size - return None + algos = [self._guess_for_size1, self._guess_for_size2] + if approx: + # find min diff to real size + min_diff = size + min_algo = None + for a in algos: + s = a(size, True, secs, heads) + if s != None: + delta = abs(size - s) + if delta < min_diff: + min_diff = delta + min_algo = a + if min_algo != None: + return min_algo(size, True, secs, heads) + else: + return None + else: # exact match + for a in algos: + s = a(size, True, secs, heads) + if s == size: + return size + return None diff --git a/amitools/fs/blkdev/HDFBlockDevice.py b/amitools/fs/blkdev/HDFBlockDevice.py index 35e67f47..72193562 100644 --- a/amitools/fs/blkdev/HDFBlockDevice.py +++ b/amitools/fs/blkdev/HDFBlockDevice.py @@ -1,35 +1,43 @@ - - - from .BlockDevice import BlockDevice from .DiskGeometry import DiskGeometry from .ImageFile import ImageFile import os.path import os -class HDFBlockDevice(BlockDevice): - def __init__(self, hdf_file, read_only=False, block_size=512, fobj=None): - self.img_file = ImageFile(hdf_file, read_only, block_size, fobj) - - def create(self, geo, reserved=2): - self._set_geometry(geo.cyls, geo.heads, geo.secs, reserved=reserved, - block_bytes=self.img_file.block_bytes) - self.img_file.create(geo.get_num_blocks()) - self.img_file.open() - def open(self, geo, reserved=2): - self._set_geometry(geo.cyls, geo.heads, geo.secs, reserved=reserved, - block_bytes=self.img_file.block_bytes) - self.img_file.open() - - def flush(self): - pass - - def close(self): - self.img_file.close() - - def read_block(self, blk_num): - return self.img_file.read_blk(blk_num) - - def write_block(self, blk_num, data): - return self.img_file.write_blk(blk_num, data) +class HDFBlockDevice(BlockDevice): + def __init__(self, hdf_file, read_only=False, block_size=512, fobj=None): + self.img_file = ImageFile(hdf_file, read_only, block_size, fobj) + + def create(self, geo, reserved=2): + self._set_geometry( + geo.cyls, + geo.heads, + geo.secs, + reserved=reserved, + block_bytes=self.img_file.block_bytes, + ) + self.img_file.create(geo.get_num_blocks()) + self.img_file.open() + + def open(self, geo, reserved=2): + self._set_geometry( + geo.cyls, + geo.heads, + geo.secs, + reserved=reserved, + block_bytes=self.img_file.block_bytes, + ) + self.img_file.open() + + def flush(self): + pass + + def close(self): + self.img_file.close() + + def read_block(self, blk_num): + return self.img_file.read_blk(blk_num) + + def write_block(self, blk_num, data): + return self.img_file.write_blk(blk_num, data) diff --git a/amitools/fs/blkdev/ImageFile.py b/amitools/fs/blkdev/ImageFile.py index 56a0ff58..95e897b4 100644 --- a/amitools/fs/blkdev/ImageFile.py +++ b/amitools/fs/blkdev/ImageFile.py @@ -4,109 +4,121 @@ import zlib import io + class ImageFile: - def __init__(self, file_name, read_only=False, block_bytes=512, fobj=None): - self.file_name = file_name - self.read_only = read_only - self.block_bytes = block_bytes - self.fobj = fobj - self.fh = None - self.size = 0 - self.num_blocks = 0 + def __init__(self, file_name, read_only=False, block_bytes=512, fobj=None): + self.file_name = file_name + self.read_only = read_only + self.block_bytes = block_bytes + self.fobj = fobj + self.fh = None + self.size = 0 + self.num_blocks = 0 + + def open(self): + # file obj? + if self.fobj is not None: + self.fh = self.fobj + # get size via seek + self.fobj.seek(0, 2) # end of file + self.size = self.fobj.tell() + self.fobj.seek(0, 0) # return to begin + self.num_blocks = self.size // self.block_bytes + # file name given + else: + # is readable? + if not os.access(self.file_name, os.R_OK): + raise IOError("Can't read from image file") + # is writeable? + if not os.access(self.file_name, os.W_OK): + self.read_only = True + # is it a block/char device? + st = os.stat(self.file_name) + mode = st.st_mode + if stat.S_ISBLK(mode) or stat.S_ISCHR(mode): + self.size = BlkDevTools.getblkdevsize(self.file_name) + else: + # get size and make sure its not empty + self.size = os.path.getsize(self.file_name) + if self.size == 0: + raise IOError("Empty image file detected!") + self.num_blocks = self.size // self.block_bytes + # open raw file + if self.read_only: + flags = "rb" + else: + flags = "r+b" + self.fh = io.open(self.file_name, flags) - def open(self): - # file obj? - if self.fobj is not None: - self.fh = self.fobj - # get size via seek - self.fobj.seek(0,2) # end of file - self.size = self.fobj.tell() - self.fobj.seek(0,0) # return to begin - self.num_blocks = self.size // self.block_bytes - # file name given - else: - # is readable? - if not os.access(self.file_name, os.R_OK): - raise IOError("Can't read from image file") - # is writeable? - if not os.access(self.file_name, os.W_OK): - self.read_only = True - # is it a block/char device? - st = os.stat(self.file_name) - mode = st.st_mode - if stat.S_ISBLK(mode) or stat.S_ISCHR(mode): - self.size = BlkDevTools.getblkdevsize(self.file_name) - else: - # get size and make sure its not empty - self.size = os.path.getsize(self.file_name) - if self.size == 0: - raise IOError("Empty image file detected!") - self.num_blocks = self.size // self.block_bytes - # open raw file - if self.read_only: - flags = "rb" - else: - flags = "r+b" - self.fh = io.open(self.file_name, flags) + def read_blk(self, blk_num, num_blks=1): + if blk_num >= self.num_blocks: + raise IOError( + "Invalid image file block num: got %d but max is %d" + % (blk_num, self.num_blocks) + ) + off = blk_num * self.block_bytes + if off != self.fh.tell(): + self.fh.seek(off, os.SEEK_SET) + num = self.block_bytes * num_blks + data = self.fh.read(num) + return data - def read_blk(self, blk_num, num_blks=1): - if blk_num >= self.num_blocks: - raise IOError("Invalid image file block num: got %d but max is %d" % (blk_num, self.num_blocks)) - off = blk_num * self.block_bytes - if off != self.fh.tell(): - self.fh.seek(off, os.SEEK_SET) - num = self.block_bytes * num_blks - data = self.fh.read(num) - return data + def write_blk(self, blk_num, data, num_blks=1): + if self.read_only: + raise IOError("Can't write block: image file is read-only") + if blk_num >= self.num_blocks: + raise IOError( + "Invalid image file block num: got %d but max is %d" + % (blk_num, self.num_blocks) + ) + if len(data) != (self.block_bytes * num_blks): + raise IOError( + "Invalid block size written: got %d but size is %d" + % (len(data), self.block_bytes) + ) + off = blk_num * self.block_bytes + if off != self.fh.tell(): + self.fh.seek(off, os.SEEK_SET) + self.fh.write(data) - def write_blk(self, blk_num, data, num_blks=1): - if self.read_only: - raise IOError("Can't write block: image file is read-only") - if blk_num >= self.num_blocks: - raise IOError("Invalid image file block num: got %d but max is %d" % (blk_num, self.num_blocks)) - if len(data) != (self.block_bytes * num_blks): - raise IOError("Invalid block size written: got %d but size is %d" % (len(data), self.block_bytes)) - off = blk_num * self.block_bytes - if off != self.fh.tell(): - self.fh.seek(off, os.SEEK_SET) - self.fh.write(data) + def flush(self): + self.fh.flush() - def flush(self): - self.fh.flush() + def close(self): + if self.fh != None: + self.fh.close() + self.fh = None - def close(self): - if self.fh != None: - self.fh.close() - self.fh = None + def create(self, num_blocks): + if self.read_only: + raise IOError("Can't create image file in read only mode") + block = b"\0" * self.block_bytes + if self.fobj is not None: + for i in range(num_blocks): + self.fobj.write(block) + self.fobj.seek(0, 0) + else: + fh = open(self.file_name, "wb") + for i in range(num_blocks): + fh.write(block) + fh.close() - def create(self, num_blocks): - if self.read_only: - raise IOError("Can't create image file in read only mode") - block = b'\0' * self.block_bytes - if self.fobj is not None: - for i in range(num_blocks): - self.fobj.write(block) - self.fobj.seek(0,0) - else: - fh = open(self.file_name, "wb") - for i in range(num_blocks): - fh.write(block) - fh.close() # --- mini test --- -if __name__ == '__main__': - import sys - for a in sys.argv[1:]: - # read image - im = ImageFile(a) - im.open() - d = im.read_blk(0) - im.write_blk(0,d) - im.close() - # read fobj - fobj = open(a,"r+b") - im = ImageFile(a,fobj=fobj) - im.open() - d = im.read_blk(0) - im.write_blk(0,d) - im.close() +if __name__ == "__main__": + import sys + + for a in sys.argv[1:]: + # read image + im = ImageFile(a) + im.open() + d = im.read_blk(0) + im.write_blk(0, d) + im.close() + # read fobj + fobj = open(a, "r+b") + im = ImageFile(a, fobj=fobj) + im.open() + d = im.read_blk(0) + im.write_blk(0, d) + im.close() diff --git a/amitools/fs/blkdev/PartBlockDevice.py b/amitools/fs/blkdev/PartBlockDevice.py index e8410bd5..54570e8c 100644 --- a/amitools/fs/blkdev/PartBlockDevice.py +++ b/amitools/fs/blkdev/PartBlockDevice.py @@ -1,12 +1,9 @@ - - from .BlockDevice import BlockDevice import os.path import os class PartBlockDevice(BlockDevice): - def __init__(self, raw_blkdev, part_blk, auto_close=False): self.raw_blkdev = raw_blkdev self.part_blk = part_blk @@ -36,8 +33,7 @@ def open(self): boot_blocks = dos_env.boot_blocks if boot_blocks == 0: boot_blocks = 2 - self._set_geometry(cyls, heads, secs, block_bytes, - reserved, boot_blocks) + self._set_geometry(cyls, heads, secs, block_bytes, reserved, boot_blocks) return True def flush(self): @@ -50,19 +46,25 @@ def close(self): def read_block(self, blk_num): if blk_num >= self.num_blocks: - raise ValueError("Invalid Part block num: got %d but max is %d" % ( - blk_num, self.num_blocks)) + raise ValueError( + "Invalid Part block num: got %d but max is %d" + % (blk_num, self.num_blocks) + ) num_blks = self.sec_per_blk off = self.blk_off + (blk_num * num_blks) return self.raw_blkdev.read_block(off, num_blks=num_blks) def write_block(self, blk_num, data): if blk_num >= self.num_blocks: - raise ValueError("Invalid Part block num: got %d but max is %d" % ( - blk_num, self.num_blocks)) + raise ValueError( + "Invalid Part block num: got %d but max is %d" + % (blk_num, self.num_blocks) + ) if len(data) != self.block_bytes: - raise ValueError("Invalid Part block size written: got %d but size is %d" % ( - len(data), self.block_bytes)) + raise ValueError( + "Invalid Part block size written: got %d but size is %d" + % (len(data), self.block_bytes) + ) num_blks = self.sec_per_blk off = self.blk_off + (blk_num * num_blks) self.raw_blkdev.write_block(off, data, num_blks=num_blks) diff --git a/amitools/fs/blkdev/RawBlockDevice.py b/amitools/fs/blkdev/RawBlockDevice.py index 7e7cf60c..5e08eabe 100644 --- a/amitools/fs/blkdev/RawBlockDevice.py +++ b/amitools/fs/blkdev/RawBlockDevice.py @@ -1,36 +1,34 @@ - - - from .BlockDevice import BlockDevice import os.path import os from .ImageFile import ImageFile + class RawBlockDevice(BlockDevice): - def __init__(self, raw_file, read_only=False, block_bytes=512, fobj=None): - self.img_file = ImageFile(raw_file, read_only, block_bytes, fobj) + def __init__(self, raw_file, read_only=False, block_bytes=512, fobj=None): + self.img_file = ImageFile(raw_file, read_only, block_bytes, fobj) - def create(self, num_blocks): - self.img_file.create(num_blocks) - self.open() - self.num_blocks = num_blocks + def create(self, num_blocks): + self.img_file.create(num_blocks) + self.open() + self.num_blocks = num_blocks - def open(self): - self.img_file.open() - # calc block longs - self.block_bytes = self.img_file.block_bytes - self.block_longs = self.block_bytes // 4 - self.num_blocks = self.img_file.num_blocks + def open(self): + self.img_file.open() + # calc block longs + self.block_bytes = self.img_file.block_bytes + self.block_longs = self.block_bytes // 4 + self.num_blocks = self.img_file.num_blocks - def flush(self): - self.img_file.flush() + def flush(self): + self.img_file.flush() - def close(self): - self.img_file.close() + def close(self): + self.img_file.close() - def read_block(self, blk_num, num_blks=1): - return self.img_file.read_blk(blk_num, num_blks) + def read_block(self, blk_num, num_blks=1): + return self.img_file.read_blk(blk_num, num_blks) - def write_block(self, blk_num, data, num_blks=1): - self.img_file.write_blk(blk_num, data, num_blks) + def write_block(self, blk_num, data, num_blks=1): + self.img_file.write_blk(blk_num, data, num_blks) diff --git a/amitools/fs/block/BitmapBlock.py b/amitools/fs/block/BitmapBlock.py index 4029bf61..052cbcfc 100644 --- a/amitools/fs/block/BitmapBlock.py +++ b/amitools/fs/block/BitmapBlock.py @@ -1,35 +1,33 @@ +from .Block import Block +class BitmapBlock(Block): + def __init__(self, blkdev, blk_num): + Block.__init__(self, blkdev, blk_num, chk_loc=0) -from .Block import Block + def set(self, data): + self._set_data(data) + self._read() -class BitmapBlock(Block): - def __init__(self, blkdev, blk_num): - Block.__init__(self, blkdev, blk_num, chk_loc=0) - - def set(self, data): - self._set_data(data) - self._read() - - def create(self): - self._create_data() - - def read(self): - self._read_data() - self._read() - - def _read(self): - Block.read(self) - if not self.valid: - return False - self.valid = True - return True - - def get_bitmap_data(self): - return self.data[4:] - - def set_bitmap_data(self, data): - self.data[4:] = data - - def dump(self): - Block.dump(self,"Bitmap") + def create(self): + self._create_data() + + def read(self): + self._read_data() + self._read() + + def _read(self): + Block.read(self) + if not self.valid: + return False + self.valid = True + return True + + def get_bitmap_data(self): + return self.data[4:] + + def set_bitmap_data(self, data): + self.data[4:] = data + + def dump(self): + Block.dump(self, "Bitmap") diff --git a/amitools/fs/block/BitmapExtBlock.py b/amitools/fs/block/BitmapExtBlock.py index 1528508d..5b5d1092 100644 --- a/amitools/fs/block/BitmapExtBlock.py +++ b/amitools/fs/block/BitmapExtBlock.py @@ -1,50 +1,47 @@ - - - from .Block import Block from amitools.util.HexDump import * + class BitmapExtBlock(Block): - def __init__(self, blkdev, blk_num): - Block.__init__(self, blkdev, blk_num) - - def set(self, data): - self._set_data(data) - self._read() - - def read(self): - self._read_data() - self._read() - - def _read(self): - # read bitmap blk ptrs - self.bitmap_ptrs = [] - for i in range(self.blkdev.block_longs-1): - bm_blk = self._get_long(i) - self.bitmap_ptrs.append(bm_blk) - - self.bitmap_ext_blk = self._get_long(-1) - - self.valid = True - return True - - def create(self): - self.bitmap_ptrs = [] - for i in range(self.blkdev.block_longs-1): - self.bitmap_ptrs.append(0) - self.bitmap_ext_blk = 0 - self.valid = True - return True - - def write(self): - self._create_data() - for i in range(self.blkdev.block_longs-1): - self._put_long(i, self.bitmap_ptrs[i]) - self._put_long(-1, self.bitmap_ext_blk) - self._write_data() - - def dump(self): - Block.dump(self, "BitmapExtBlock", False) - print(" bmp ptrs: %s" % self.bitmap_ptrs) - print(" bmp ext: %d" % self.bitmap_ext_blk) - \ No newline at end of file + def __init__(self, blkdev, blk_num): + Block.__init__(self, blkdev, blk_num) + + def set(self, data): + self._set_data(data) + self._read() + + def read(self): + self._read_data() + self._read() + + def _read(self): + # read bitmap blk ptrs + self.bitmap_ptrs = [] + for i in range(self.blkdev.block_longs - 1): + bm_blk = self._get_long(i) + self.bitmap_ptrs.append(bm_blk) + + self.bitmap_ext_blk = self._get_long(-1) + + self.valid = True + return True + + def create(self): + self.bitmap_ptrs = [] + for i in range(self.blkdev.block_longs - 1): + self.bitmap_ptrs.append(0) + self.bitmap_ext_blk = 0 + self.valid = True + return True + + def write(self): + self._create_data() + for i in range(self.blkdev.block_longs - 1): + self._put_long(i, self.bitmap_ptrs[i]) + self._put_long(-1, self.bitmap_ext_blk) + self._write_data() + + def dump(self): + Block.dump(self, "BitmapExtBlock", False) + print(" bmp ptrs: %s" % self.bitmap_ptrs) + print(" bmp ext: %d" % self.bitmap_ext_blk) diff --git a/amitools/fs/block/Block.py b/amitools/fs/block/Block.py index 364ce388..e92a6c94 100644 --- a/amitools/fs/block/Block.py +++ b/amitools/fs/block/Block.py @@ -1,6 +1,3 @@ - - - import struct import ctypes from ..TimeStamp import TimeStamp @@ -8,240 +5,248 @@ class Block: - # mark end of block list - no_blk = 0xffffffff - - # special blocks - RDSK = 0x5244534b # Rigid Disk Block - BADB = 0x42414442 # Bad Blocks Block - PART = 0x50415254 # Partition Block - FSHD = 0x46534844 # FileSystem Header Block - LSEG = 0x4c534547 # LoadSeg Block - - # block types - T_SHORT = 2 - T_DATA = 8 - T_LIST = 16 - T_DIR_CACHE = 33 - T_COMMENT = 64 - # block sub types - ST_ROOT = 1 - ST_USERDIR = 2 - ST_FILE = -3 & 0xffffffff - - def __init__(self, blkdev, blk_num, is_type=0, is_sub_type=0, chk_loc=5): - self.valid = False - self.blkdev = blkdev - self.blk_num = blk_num - self.block_longs = blkdev.block_longs - self.type = 0 - self.sub_type = 0 - self.data = None - self.is_type = is_type - self.is_sub_type = is_sub_type - self.chk_loc = chk_loc - - def __str__(self): - return "%s:@%d" % (self.__class__.__name__, self.blk_num) - - def create(self): - self.type = self.is_type - self.sub_type = self.is_sub_type - - def is_root_block(self): - return self.type == Block.T_SHORT and self.sub_type == Block.ST_ROOT - - def is_user_dir_block(self): - return self.type == Block.T_SHORT and self.sub_type == Block.ST_USERDIR - - def is_file_header_block(self): - return self.type == Block.T_SHORT and self.sub_type == Block.ST_FILE - - def is_file_list_block(self): - return self.type == Block.T_LIST and self.sub_type == Block.ST_FILE - - def is_file_data_block(self): - return self.type == Block.T_DATA - - def is_comment_block(self): - return self.type == Block.T_COMMENT - - def read(self): - if self.data == None: - self._read_data() - self._get_types() - self._get_chksum() - self.valid = self.valid_types and self.valid_chksum - - def write(self): - if self.data == None: - self._create_data() - self._put_types() - self._put_chksum() - self._write_data() - - def _set_data(self, data): - self.data = data - - def _read_data(self): - data = self.blkdev.read_block(self.blk_num) - if len(data) != self.blkdev.block_bytes: - raise ValueError("Invalid Block Data: size=%d but expected %d" % (len(data), self.blkdev.block_bytes)) - self._create_data() - self.data[:] = data - - def _write_data(self): - if self.data != None: - self.blkdev.write_block(self.blk_num, self.data) - - def _free_data(self): - self.data = None - - def _create_data(self): - num_bytes = self.blkdev.block_bytes - self.data = ctypes.create_string_buffer(num_bytes) - - def _put_long(self, num, val): - if num < 0: - num = self.block_longs + num - struct.pack_into(">I",self.data,num*4,val) - - def _get_long(self, num): - if num < 0: - num = self.block_longs + num - return struct.unpack_from(">I",self.data,num*4)[0] - - def _put_slong(self, num, val): - if num < 0: - num = self.block_longs + num - struct.pack_into(">i",self.data,num*4,val) - - def _get_slong(self, num): - if num < 0: - num = self.block_longs + num - return struct.unpack_from(">i",self.data,num*4)[0] - - def _get_types(self): - self.type = self._get_long(0) - self.sub_type = self._get_long(-1) - self.valid_types = True - if self.is_type != 0: - if self.type != self.is_type: - self.valid_types = False - if self.is_sub_type != 0: - if self.sub_type != self.is_sub_type: - self.valid_types = False - - def _put_types(self): - if self.is_type != 0: - self._put_long(0, self.is_type) - if self.is_sub_type != 0: - self._put_long(-1, self.is_sub_type) - - def _get_chksum(self): - self.got_chksum = self._get_long(self.chk_loc) - self.calc_chksum = self._calc_chksum() - self.valid_chksum = self.got_chksum == self.calc_chksum - - def _put_chksum(self): - self.calc_chksum = self._calc_chksum() - self.got_chksum = self.calc_chksum - self.valid_chksum = True - self._put_long(self.chk_loc, self.calc_chksum) - - def _calc_chksum(self): - chksum = 0 - for i in range(self.block_longs): - if i != self.chk_loc: - chksum += self._get_long(i) - return (-chksum) & 0xffffffff - - def _get_timestamp(self, loc): - days = self._get_long(loc) - mins = self._get_long(loc+1) - ticks = self._get_long(loc+2) - return TimeStamp(days, mins, ticks) - - def _put_timestamp(self, loc, ts): - if ts == None: - ts = TimeStamp() - self._put_long(loc, ts.days) - self._put_long(loc+1, ts.mins) - self._put_long(loc+2, ts.ticks) - - def _get_bytes(self, loc, size): - if loc < 0: - loc = self.block_longs + loc - loc = loc * 4 - return self.data[loc:loc+size] - - def _put_bytes(self, loc, data): - if loc < 0: - loc = self.block_longs + loc - loc = loc * 4 - size = len(data) - self.data[loc:loc+size] = data - - def _get_bstr(self, loc, max_size): - if loc < 0: - loc = self.block_longs + loc - loc = loc * 4 - size = ord(self.data[loc]) - if size > max_size: - return None - if size == 0: - return FSString() - name = self.data[loc+1:loc+1+size] - return FSString(name) - - def _put_bstr(self, loc, max_size, fs_str): - if fs_str is None: - fs_str = FSString() - assert isinstance(fs_str, FSString) - bstr = fs_str.get_ami_str() - assert len(bstr) < 256 - n = len(bstr) - if n > max_size: - bstr = bstr[:max_size] - if loc < 0: - loc = self.block_longs + loc - loc = loc * 4 - self.data[loc] = len(bstr) - if len(bstr) > 0: - self.data[loc+1:loc+1+len(bstr)] = bstr - - def _get_cstr(self, loc, max_size): - n = 0 - s = b"" - loc = loc * 4 - while n < max_size: - c = self.data[loc+n] - if ord(c) == 0: - break - s += c - n += 1 - return FSString(s) - - def _put_cstr(self, loc, max_size, fs_str): - if fs_str is None: - fs_str = FSString() - assert isinstance(fs_str, FSString) - cstr = fs_str.get_ami_str() - n = min(max_size, len(cstr)) - loc = loc * 4 - if n > 0: - self.data[loc:loc+n] = cstr - - def _dump_ptr(self, ptr): - if ptr == self.no_blk: - return "none" - else: - return "%d" % ptr - - def dump(self, name, details=True): - print("%sBlock(%d):" % (name, self.blk_num)) - if details: - print(" types: %x/%x (valid: %x/%x)" % (self.type, self.sub_type, self.is_type, self.is_sub_type)) - print(" chksum: 0x%08x (got) 0x%08x (calc)" % (self.got_chksum, self.calc_chksum)) - print(" valid: %s" % self.valid) - \ No newline at end of file + # mark end of block list + no_blk = 0xFFFFFFFF + + # special blocks + RDSK = 0x5244534B # Rigid Disk Block + BADB = 0x42414442 # Bad Blocks Block + PART = 0x50415254 # Partition Block + FSHD = 0x46534844 # FileSystem Header Block + LSEG = 0x4C534547 # LoadSeg Block + + # block types + T_SHORT = 2 + T_DATA = 8 + T_LIST = 16 + T_DIR_CACHE = 33 + T_COMMENT = 64 + # block sub types + ST_ROOT = 1 + ST_USERDIR = 2 + ST_FILE = -3 & 0xFFFFFFFF + + def __init__(self, blkdev, blk_num, is_type=0, is_sub_type=0, chk_loc=5): + self.valid = False + self.blkdev = blkdev + self.blk_num = blk_num + self.block_longs = blkdev.block_longs + self.type = 0 + self.sub_type = 0 + self.data = None + self.is_type = is_type + self.is_sub_type = is_sub_type + self.chk_loc = chk_loc + + def __str__(self): + return "%s:@%d" % (self.__class__.__name__, self.blk_num) + + def create(self): + self.type = self.is_type + self.sub_type = self.is_sub_type + + def is_root_block(self): + return self.type == Block.T_SHORT and self.sub_type == Block.ST_ROOT + + def is_user_dir_block(self): + return self.type == Block.T_SHORT and self.sub_type == Block.ST_USERDIR + + def is_file_header_block(self): + return self.type == Block.T_SHORT and self.sub_type == Block.ST_FILE + + def is_file_list_block(self): + return self.type == Block.T_LIST and self.sub_type == Block.ST_FILE + + def is_file_data_block(self): + return self.type == Block.T_DATA + + def is_comment_block(self): + return self.type == Block.T_COMMENT + + def read(self): + if self.data == None: + self._read_data() + self._get_types() + self._get_chksum() + self.valid = self.valid_types and self.valid_chksum + + def write(self): + if self.data == None: + self._create_data() + self._put_types() + self._put_chksum() + self._write_data() + + def _set_data(self, data): + self.data = data + + def _read_data(self): + data = self.blkdev.read_block(self.blk_num) + if len(data) != self.blkdev.block_bytes: + raise ValueError( + "Invalid Block Data: size=%d but expected %d" + % (len(data), self.blkdev.block_bytes) + ) + self._create_data() + self.data[:] = data + + def _write_data(self): + if self.data != None: + self.blkdev.write_block(self.blk_num, self.data) + + def _free_data(self): + self.data = None + + def _create_data(self): + num_bytes = self.blkdev.block_bytes + self.data = ctypes.create_string_buffer(num_bytes) + + def _put_long(self, num, val): + if num < 0: + num = self.block_longs + num + struct.pack_into(">I", self.data, num * 4, val) + + def _get_long(self, num): + if num < 0: + num = self.block_longs + num + return struct.unpack_from(">I", self.data, num * 4)[0] + + def _put_slong(self, num, val): + if num < 0: + num = self.block_longs + num + struct.pack_into(">i", self.data, num * 4, val) + + def _get_slong(self, num): + if num < 0: + num = self.block_longs + num + return struct.unpack_from(">i", self.data, num * 4)[0] + + def _get_types(self): + self.type = self._get_long(0) + self.sub_type = self._get_long(-1) + self.valid_types = True + if self.is_type != 0: + if self.type != self.is_type: + self.valid_types = False + if self.is_sub_type != 0: + if self.sub_type != self.is_sub_type: + self.valid_types = False + + def _put_types(self): + if self.is_type != 0: + self._put_long(0, self.is_type) + if self.is_sub_type != 0: + self._put_long(-1, self.is_sub_type) + + def _get_chksum(self): + self.got_chksum = self._get_long(self.chk_loc) + self.calc_chksum = self._calc_chksum() + self.valid_chksum = self.got_chksum == self.calc_chksum + + def _put_chksum(self): + self.calc_chksum = self._calc_chksum() + self.got_chksum = self.calc_chksum + self.valid_chksum = True + self._put_long(self.chk_loc, self.calc_chksum) + + def _calc_chksum(self): + chksum = 0 + for i in range(self.block_longs): + if i != self.chk_loc: + chksum += self._get_long(i) + return (-chksum) & 0xFFFFFFFF + + def _get_timestamp(self, loc): + days = self._get_long(loc) + mins = self._get_long(loc + 1) + ticks = self._get_long(loc + 2) + return TimeStamp(days, mins, ticks) + + def _put_timestamp(self, loc, ts): + if ts == None: + ts = TimeStamp() + self._put_long(loc, ts.days) + self._put_long(loc + 1, ts.mins) + self._put_long(loc + 2, ts.ticks) + + def _get_bytes(self, loc, size): + if loc < 0: + loc = self.block_longs + loc + loc = loc * 4 + return self.data[loc : loc + size] + + def _put_bytes(self, loc, data): + if loc < 0: + loc = self.block_longs + loc + loc = loc * 4 + size = len(data) + self.data[loc : loc + size] = data + + def _get_bstr(self, loc, max_size): + if loc < 0: + loc = self.block_longs + loc + loc = loc * 4 + size = ord(self.data[loc]) + if size > max_size: + return None + if size == 0: + return FSString() + name = self.data[loc + 1 : loc + 1 + size] + return FSString(name) + + def _put_bstr(self, loc, max_size, fs_str): + if fs_str is None: + fs_str = FSString() + assert isinstance(fs_str, FSString) + bstr = fs_str.get_ami_str() + assert len(bstr) < 256 + n = len(bstr) + if n > max_size: + bstr = bstr[:max_size] + if loc < 0: + loc = self.block_longs + loc + loc = loc * 4 + self.data[loc] = len(bstr) + if len(bstr) > 0: + self.data[loc + 1 : loc + 1 + len(bstr)] = bstr + + def _get_cstr(self, loc, max_size): + n = 0 + s = b"" + loc = loc * 4 + while n < max_size: + c = self.data[loc + n] + if ord(c) == 0: + break + s += c + n += 1 + return FSString(s) + + def _put_cstr(self, loc, max_size, fs_str): + if fs_str is None: + fs_str = FSString() + assert isinstance(fs_str, FSString) + cstr = fs_str.get_ami_str() + n = min(max_size, len(cstr)) + loc = loc * 4 + if n > 0: + self.data[loc : loc + n] = cstr + + def _dump_ptr(self, ptr): + if ptr == self.no_blk: + return "none" + else: + return "%d" % ptr + + def dump(self, name, details=True): + print("%sBlock(%d):" % (name, self.blk_num)) + if details: + print( + " types: %x/%x (valid: %x/%x)" + % (self.type, self.sub_type, self.is_type, self.is_sub_type) + ) + print( + " chksum: 0x%08x (got) 0x%08x (calc)" + % (self.got_chksum, self.calc_chksum) + ) + print(" valid: %s" % self.valid) diff --git a/amitools/fs/block/BootBlock.py b/amitools/fs/block/BootBlock.py index 6c3735be..6bd32534 100644 --- a/amitools/fs/block/BootBlock.py +++ b/amitools/fs/block/BootBlock.py @@ -1,175 +1,178 @@ - - - import os.path from .Block import Block import amitools.fs.DosType as DosType -class BootBlock(Block): - def __init__(self, blkdev, blk_num=0): - Block.__init__(self, blkdev, blk_num) - self.dos_type = None - self.got_root_blk = None - self.got_chksum = 0 - self.calc_chksum = 0 - self.boot_code = None - self.num_extra = self.blkdev.bootblocks - 1 - self.max_boot_code = self.blkdev.bootblocks * self.blkdev.block_bytes - 12 - self.extra_blks = [] - - def create(self, dos_type=DosType.DOS0, root_blk=None, boot_code=None): - Block.create(self) - self._create_data() - self.dos_type = dos_type - self.valid_dos_type = True - # root blk - self.calc_root_blk = int(self.blkdev.num_blocks // 2) - if root_blk != None: - self.got_root_blk = root_blk - else: - self.got_root_blk = self.calc_root_blk - # create extra blks - self.extra_blks = [] - for i in range(self.num_extra): - b = Block(self.blkdev, self.blk_num + 1 + i) - b._create_data() - self.extra_blks.append(b) - # setup boot code - return self.set_boot_code(boot_code) - - def set_boot_code(self, boot_code): - if boot_code != None: - if len(boot_code) <= self.max_boot_code: - self.boot_code = boot_code - self.valid = True - else: - self.valid = False - else: - self.boot_code = None - self.valid = True - return self.valid - - def _calc_chksum(self): - all_blks = [self] + self.extra_blks - n = self.blkdev.block_longs - chksum = 0 - for blk in all_blks: - for i in range(n): - if i != 1: # skip chksum - chksum += blk._get_long(i) - if chksum > 0xffffffff: - chksum += 1 - chksum &= 0xffffffff - return (~chksum) & 0xffffffff - - def read(self): - self._read_data() - # read extra boot blocks - self.extra_blks = [] - for i in range(self.num_extra): - b = Block(self.blkdev, self.blk_num + 1 + i) - b._read_data() - self.extra_blks.append(b) - - self.dos_type = self._get_long(0) - self.got_chksum = self._get_long(1) - self.got_root_blk = self._get_long(2) - self.calc_chksum = self._calc_chksum() - # calc position of root block - self.calc_root_blk = int(self.blkdev.num_blocks // 2) - # check validity - self.valid_chksum = self.got_chksum == self.calc_chksum - self.valid_dos_type = DosType.is_valid(self.dos_type) - self.valid = self.valid_dos_type - - # look for boot_code - if self.valid: - self.read_boot_code() - return self.valid - - def read_boot_code(self): - boot_code = self.data[12:] - for blk in self.extra_blks: - boot_code += blk.data.raw - # remove nulls at end - pos = len(boot_code) - 4 - while pos > 0: - tag = boot_code[pos:pos+3] - if tag != 'DOS' and boot_code[pos] !=0: - pos += 4 - break - pos -= 4 - # something left - if pos > 0: - boot_code = boot_code[:pos] - self.boot_code = boot_code - - def write(self): - self._create_data() - self._put_long(0, self.dos_type) - self._put_long(2, self.got_root_blk) - - if self.boot_code != None: - self.write_boot_code() - self.calc_chksum = self._calc_chksum() - self._put_long(1, self.calc_chksum) - self.valid_chksum = True - else: - self.calc_chksum = 0 - self.valid_chksum = False - - self._write_data() - - def write_boot_code(self): - n = len(self.boot_code) - bb = self.blkdev.block_bytes - first_size = bb - 12 - boot_code = self.boot_code - # spans more blocks - if n > first_size: - extra = boot_code[first_size:] - boot_code = boot_code[:first_size] - # write extra blocks - pos = 0 - off = 0 - n -= first_size - while n > 0: - num = n - if num > bb: - num = bb - self.extra_blks[pos].data[:num] = extra[off:off+num] - self.extra_blks[pos]._write_data() - off += num - pos += 1 - n -= num - # use this for first block - n = first_size - - # embed boot code in boot block - self.data[12:12+n] = boot_code - - def dump(self): - print("BootBlock(%d):" % self.blk_num) - print(" dos_type: 0x%08x %s (valid: %s) is_ffs=%s is_intl=%s is_dircache=%s" \ - % (self.dos_type, DosType.num_to_tag_str(self.dos_type), - self.valid_dos_type, - DosType.is_ffs(self.dos_type), - DosType.is_intl(self.dos_type), - DosType.is_dircache(self.dos_type))) - print(" root_blk: %d (got %d)" % (self.calc_root_blk, self.got_root_blk)) - print(" chksum: 0x%08x (got) 0x%08x (calc) -> bootable: %s" % (self.got_chksum, self.calc_chksum, self.valid_chksum)) - print(" valid: %s" % self.valid) - if self.boot_code != None: - print(" boot_code: %d bytes" % len(self.boot_code)) - - def get_boot_code_dir(self): - my_dir = os.path.dirname(__file__) - bc_dir = os.path.join(my_dir, "bootcode") - if os.path.exists(bc_dir): - return bc_dir - else: - return None - - \ No newline at end of file +class BootBlock(Block): + def __init__(self, blkdev, blk_num=0): + Block.__init__(self, blkdev, blk_num) + self.dos_type = None + self.got_root_blk = None + self.got_chksum = 0 + self.calc_chksum = 0 + self.boot_code = None + self.num_extra = self.blkdev.bootblocks - 1 + self.max_boot_code = self.blkdev.bootblocks * self.blkdev.block_bytes - 12 + self.extra_blks = [] + + def create(self, dos_type=DosType.DOS0, root_blk=None, boot_code=None): + Block.create(self) + self._create_data() + self.dos_type = dos_type + self.valid_dos_type = True + # root blk + self.calc_root_blk = int(self.blkdev.num_blocks // 2) + if root_blk != None: + self.got_root_blk = root_blk + else: + self.got_root_blk = self.calc_root_blk + # create extra blks + self.extra_blks = [] + for i in range(self.num_extra): + b = Block(self.blkdev, self.blk_num + 1 + i) + b._create_data() + self.extra_blks.append(b) + # setup boot code + return self.set_boot_code(boot_code) + + def set_boot_code(self, boot_code): + if boot_code != None: + if len(boot_code) <= self.max_boot_code: + self.boot_code = boot_code + self.valid = True + else: + self.valid = False + else: + self.boot_code = None + self.valid = True + return self.valid + + def _calc_chksum(self): + all_blks = [self] + self.extra_blks + n = self.blkdev.block_longs + chksum = 0 + for blk in all_blks: + for i in range(n): + if i != 1: # skip chksum + chksum += blk._get_long(i) + if chksum > 0xFFFFFFFF: + chksum += 1 + chksum &= 0xFFFFFFFF + return (~chksum) & 0xFFFFFFFF + + def read(self): + self._read_data() + # read extra boot blocks + self.extra_blks = [] + for i in range(self.num_extra): + b = Block(self.blkdev, self.blk_num + 1 + i) + b._read_data() + self.extra_blks.append(b) + + self.dos_type = self._get_long(0) + self.got_chksum = self._get_long(1) + self.got_root_blk = self._get_long(2) + self.calc_chksum = self._calc_chksum() + # calc position of root block + self.calc_root_blk = int(self.blkdev.num_blocks // 2) + # check validity + self.valid_chksum = self.got_chksum == self.calc_chksum + self.valid_dos_type = DosType.is_valid(self.dos_type) + self.valid = self.valid_dos_type + + # look for boot_code + if self.valid: + self.read_boot_code() + return self.valid + + def read_boot_code(self): + boot_code = self.data[12:] + for blk in self.extra_blks: + boot_code += blk.data.raw + # remove nulls at end + pos = len(boot_code) - 4 + while pos > 0: + tag = boot_code[pos : pos + 3] + if tag != "DOS" and boot_code[pos] != 0: + pos += 4 + break + pos -= 4 + # something left + if pos > 0: + boot_code = boot_code[:pos] + self.boot_code = boot_code + + def write(self): + self._create_data() + self._put_long(0, self.dos_type) + self._put_long(2, self.got_root_blk) + + if self.boot_code != None: + self.write_boot_code() + self.calc_chksum = self._calc_chksum() + self._put_long(1, self.calc_chksum) + self.valid_chksum = True + else: + self.calc_chksum = 0 + self.valid_chksum = False + + self._write_data() + + def write_boot_code(self): + n = len(self.boot_code) + bb = self.blkdev.block_bytes + first_size = bb - 12 + boot_code = self.boot_code + # spans more blocks + if n > first_size: + extra = boot_code[first_size:] + boot_code = boot_code[:first_size] + # write extra blocks + pos = 0 + off = 0 + n -= first_size + while n > 0: + num = n + if num > bb: + num = bb + self.extra_blks[pos].data[:num] = extra[off : off + num] + self.extra_blks[pos]._write_data() + off += num + pos += 1 + n -= num + # use this for first block + n = first_size + + # embed boot code in boot block + self.data[12 : 12 + n] = boot_code + + def dump(self): + print("BootBlock(%d):" % self.blk_num) + print( + " dos_type: 0x%08x %s (valid: %s) is_ffs=%s is_intl=%s is_dircache=%s" + % ( + self.dos_type, + DosType.num_to_tag_str(self.dos_type), + self.valid_dos_type, + DosType.is_ffs(self.dos_type), + DosType.is_intl(self.dos_type), + DosType.is_dircache(self.dos_type), + ) + ) + print(" root_blk: %d (got %d)" % (self.calc_root_blk, self.got_root_blk)) + print( + " chksum: 0x%08x (got) 0x%08x (calc) -> bootable: %s" + % (self.got_chksum, self.calc_chksum, self.valid_chksum) + ) + print(" valid: %s" % self.valid) + if self.boot_code != None: + print(" boot_code: %d bytes" % len(self.boot_code)) + + def get_boot_code_dir(self): + my_dir = os.path.dirname(__file__) + bc_dir = os.path.join(my_dir, "bootcode") + if os.path.exists(bc_dir): + return bc_dir + else: + return None diff --git a/amitools/fs/block/CommentBlock.py b/amitools/fs/block/CommentBlock.py index fa33f4f0..32fd6ae4 100644 --- a/amitools/fs/block/CommentBlock.py +++ b/amitools/fs/block/CommentBlock.py @@ -1,50 +1,48 @@ - - - import time from .Block import Block + class CommentBlock(Block): - def __init__(self, blkdev, blk_num): - Block.__init__(self, blkdev, blk_num, is_type=Block.T_COMMENT) - self.comment = "" - - def create(self, header_key, comment=""): - Block.create(self) - self.own_key = self.blk_num - self.header_key = header_key - self.comment = comment - - def set(self, data): - self._set_data(data) - self._read() - - def read(self): - self._read_data() - self._read() - - def _read(self): - Block.read(self) - if not self.valid: - return False - - # Comment fields - self.own_key = self._get_long(1) - self.header_key = self._get_long(2) - self.checksum = self._get_long(5) - self.comment = self._get_bstr(6, 79) - self.valid = (self.own_key == self.blk_num) - return self.valid - - def write(self): - Block._create_data(self) - self._put_long(1, self.own_key) - self._put_long(2, self.header_key) - self._put_bstr(6, 79, self.comment) - Block.write(self) - - def dump(self): - Block.dump(self,"Comment") - print(" own_key: %d" % (self.own_key)) - print(" header_key: %d" % (self.header_key)) - print(" comment: '%s'" % self.comment) + def __init__(self, blkdev, blk_num): + Block.__init__(self, blkdev, blk_num, is_type=Block.T_COMMENT) + self.comment = "" + + def create(self, header_key, comment=""): + Block.create(self) + self.own_key = self.blk_num + self.header_key = header_key + self.comment = comment + + def set(self, data): + self._set_data(data) + self._read() + + def read(self): + self._read_data() + self._read() + + def _read(self): + Block.read(self) + if not self.valid: + return False + + # Comment fields + self.own_key = self._get_long(1) + self.header_key = self._get_long(2) + self.checksum = self._get_long(5) + self.comment = self._get_bstr(6, 79) + self.valid = self.own_key == self.blk_num + return self.valid + + def write(self): + Block._create_data(self) + self._put_long(1, self.own_key) + self._put_long(2, self.header_key) + self._put_bstr(6, 79, self.comment) + Block.write(self) + + def dump(self): + Block.dump(self, "Comment") + print(" own_key: %d" % (self.own_key)) + print(" header_key: %d" % (self.header_key)) + print(" comment: '%s'" % self.comment) diff --git a/amitools/fs/block/DirCacheBlock.py b/amitools/fs/block/DirCacheBlock.py index cd50be4f..1154433b 100644 --- a/amitools/fs/block/DirCacheBlock.py +++ b/amitools/fs/block/DirCacheBlock.py @@ -1,6 +1,3 @@ - - - import time import struct @@ -9,173 +6,189 @@ from ..TimeStamp import TimeStamp from ..FSString import FSString + class DirCacheRecord: - def __init__(self, entry=0, size=0, protect=0, mod_ts=None, sub_type=0, name='', comment=None): - self.entry = entry - self.size = size - self.protect = protect - self.mod_ts = mod_ts - self.sub_type = sub_type - self.name = name - if comment is None: - self.comment = FSString() - else: - self.comment = comment - self.offset = None - - def get_size(self): - total_len = 25 + len(self.name.get_ami_str()) + len(self.comment.get_ami_str()) - align_len = (total_len + 1) & ~1 - return align_len - - def get(self, data, off): - self.offset = off - # header - d = struct.unpack_from(">IIIHHHHH",data,offset=off) - self.entry = d[0] - self.size = d[1] - self.protect = d[2] - self.mod_ts = TimeStamp(d[5],d[6],d[7]) - self.type = ord(data[off + 22]) - # name - name_len = ord(data[off + 23]) - name_off = off + 24 - self.name = FSString(data[name_off : name_off + name_len]) - # comment - comment_len = ord(data[off + name_len + 24]) - comment_off = off + 25 + name_len - self.comment = FSString(data[comment_off : comment_off + comment_len]) - return off + self.get_size() - - def put(self, data, off): - self.offset = off - # header - ts = self.mod_ts - struct.pack_into(">IIIHHHHH",data,off,self.entry,self.size,self.protect,0,0,ts.days,ts.mins,ts.ticks) - # name - name = self.name.get_ami_str() - name_len = len(name) - data[off + 23] = name_len - name_off = off + 24 - data[name_off : name_off + name_len] = name - # comment - comment = self.comment.get_ami_str() - comment_len = len(comment) - data[off + 24 + name_len] = comment_len - comment_off = off + 25 + name_len - data[comment_off : comment_off + comment_len] = comment - return off + self.get_size() - - def dump(self): - print("DirCacheRecord(%s)(size=%d)" % (self.offset, self.get_size())) - print("\tentry: %s" % self.entry) - print("\tsize: %s" % self.size) - pf = ProtectFlags(self.protect) - print("\tprotect: 0x%x 0b%s %s" % (self.protect, pf.bin_str(), pf)) - print("\tmod_ts: %s" % self.mod_ts) - print("\tsub_type: 0x%x" % self.sub_type) - print("\tname: %s" % self.name) - print("\tcomment: %s" % self.comment) + def __init__( + self, entry=0, size=0, protect=0, mod_ts=None, sub_type=0, name="", comment=None + ): + self.entry = entry + self.size = size + self.protect = protect + self.mod_ts = mod_ts + self.sub_type = sub_type + self.name = name + if comment is None: + self.comment = FSString() + else: + self.comment = comment + self.offset = None + + def get_size(self): + total_len = 25 + len(self.name.get_ami_str()) + len(self.comment.get_ami_str()) + align_len = (total_len + 1) & ~1 + return align_len + + def get(self, data, off): + self.offset = off + # header + d = struct.unpack_from(">IIIHHHHH", data, offset=off) + self.entry = d[0] + self.size = d[1] + self.protect = d[2] + self.mod_ts = TimeStamp(d[5], d[6], d[7]) + self.type = ord(data[off + 22]) + # name + name_len = ord(data[off + 23]) + name_off = off + 24 + self.name = FSString(data[name_off : name_off + name_len]) + # comment + comment_len = ord(data[off + name_len + 24]) + comment_off = off + 25 + name_len + self.comment = FSString(data[comment_off : comment_off + comment_len]) + return off + self.get_size() + + def put(self, data, off): + self.offset = off + # header + ts = self.mod_ts + struct.pack_into( + ">IIIHHHHH", + data, + off, + self.entry, + self.size, + self.protect, + 0, + 0, + ts.days, + ts.mins, + ts.ticks, + ) + # name + name = self.name.get_ami_str() + name_len = len(name) + data[off + 23] = name_len + name_off = off + 24 + data[name_off : name_off + name_len] = name + # comment + comment = self.comment.get_ami_str() + comment_len = len(comment) + data[off + 24 + name_len] = comment_len + comment_off = off + 25 + name_len + data[comment_off : comment_off + comment_len] = comment + return off + self.get_size() + + def dump(self): + print("DirCacheRecord(%s)(size=%d)" % (self.offset, self.get_size())) + print("\tentry: %s" % self.entry) + print("\tsize: %s" % self.size) + pf = ProtectFlags(self.protect) + print("\tprotect: 0x%x 0b%s %s" % (self.protect, pf.bin_str(), pf)) + print("\tmod_ts: %s" % self.mod_ts) + print("\tsub_type: 0x%x" % self.sub_type) + print("\tname: %s" % self.name) + print("\tcomment: %s" % self.comment) + class DirCacheBlock(Block): - def __init__(self, blkdev, blk_num): - Block.__init__(self, blkdev, blk_num, is_type=Block.T_DIR_CACHE) - - def set(self, data): - self._set_data(data) - self._read() - - def read(self): - self._read_data() - self._read() - - def _read(self): - Block.read(self) - if not self.valid: - return False - - # fields - self.own_key = self._get_long(1) - self.parent = self._get_long(2) - self.num_records = self._get_long(3) - self.next_cache = self._get_long(4) - self.records = [] - - # get records - off = 24 - for i in range(self.num_records): - r = DirCacheRecord() - off = r.get(self.data, off) - if off == -1: - return False - self.records.append(r) - - self.valid = True - return True - - def get_total_record_size(self): - size = 0 - for r in self.records: - size += r.get_size() - return size - - def get_free_record_size(self): - return self.blkdev.block_bytes - 24 - self.get_total_record_size() - - def create(self, parent, records=None, next_cache=0): - Block.create(self) - self.own_key = self.blk_num - self.parent = parent - self.next_cache = next_cache - if records == None: - self.num_records = 0 - self.records = [] - else: - self.num_records = len(records) - self.records = records - self.valid = True - return True - - def add_record(self, record): - self.records.append(record) - self.num_records = len(self.records) - - def get_record_by_name(self, name): - for r in self.records: - if r.name == name: - return r - return None - - def remove_record(self, record): - self.records.remove(record) - self.num_records = len(self.records) - - def has_record(self, record): - return record in self.records - - def is_empty(self): - return self.num_records == 0 - - def write(self): - Block._create_data(self) - self._put_long(1, self.own_key) - self._put_long(2, self.parent) - self._put_long(3, self.num_records) - self._put_long(4, self.next_cache) - - # put records - off = 24 - for r in self.records: - off = r.put(self.data, off) - - Block.write(self) - - def dump(self): - Block.dump(self,"DirCache") - print(" own_key: %d" % (self.own_key)) - print(" parent: %d" % (self.parent)) - print(" num_records:%d" % (self.num_records)) - print(" next_cache: %d" % (self.next_cache)) - print(" num records:%d" % len(self.records)) - for r in self.records: - r.dump() + def __init__(self, blkdev, blk_num): + Block.__init__(self, blkdev, blk_num, is_type=Block.T_DIR_CACHE) + + def set(self, data): + self._set_data(data) + self._read() + + def read(self): + self._read_data() + self._read() + + def _read(self): + Block.read(self) + if not self.valid: + return False + + # fields + self.own_key = self._get_long(1) + self.parent = self._get_long(2) + self.num_records = self._get_long(3) + self.next_cache = self._get_long(4) + self.records = [] + + # get records + off = 24 + for i in range(self.num_records): + r = DirCacheRecord() + off = r.get(self.data, off) + if off == -1: + return False + self.records.append(r) + + self.valid = True + return True + + def get_total_record_size(self): + size = 0 + for r in self.records: + size += r.get_size() + return size + + def get_free_record_size(self): + return self.blkdev.block_bytes - 24 - self.get_total_record_size() + + def create(self, parent, records=None, next_cache=0): + Block.create(self) + self.own_key = self.blk_num + self.parent = parent + self.next_cache = next_cache + if records == None: + self.num_records = 0 + self.records = [] + else: + self.num_records = len(records) + self.records = records + self.valid = True + return True + + def add_record(self, record): + self.records.append(record) + self.num_records = len(self.records) + + def get_record_by_name(self, name): + for r in self.records: + if r.name == name: + return r + return None + + def remove_record(self, record): + self.records.remove(record) + self.num_records = len(self.records) + + def has_record(self, record): + return record in self.records + + def is_empty(self): + return self.num_records == 0 + + def write(self): + Block._create_data(self) + self._put_long(1, self.own_key) + self._put_long(2, self.parent) + self._put_long(3, self.num_records) + self._put_long(4, self.next_cache) + + # put records + off = 24 + for r in self.records: + off = r.put(self.data, off) + + Block.write(self) + + def dump(self): + Block.dump(self, "DirCache") + print(" own_key: %d" % (self.own_key)) + print(" parent: %d" % (self.parent)) + print(" num_records:%d" % (self.num_records)) + print(" next_cache: %d" % (self.next_cache)) + print(" num records:%d" % len(self.records)) + for r in self.records: + r.dump() diff --git a/amitools/fs/block/EntryBlock.py b/amitools/fs/block/EntryBlock.py index 7ce642d4..8d4e4d4a 100644 --- a/amitools/fs/block/EntryBlock.py +++ b/amitools/fs/block/EntryBlock.py @@ -1,63 +1,62 @@ - - - from .Block import Block from .CommentBlock import CommentBlock from ..FSString import FSString + class EntryBlock(Block): - """Base class for all block types that describe entries within a directory""" - def __init__(self, blkdev, blk_num, is_type, is_sub_type, is_longname=False): - Block.__init__(self, blkdev, blk_num, is_type, is_sub_type) - self.is_longname = is_longname - self.comment_block_id = 0 + """Base class for all block types that describe entries within a directory""" + + def __init__(self, blkdev, blk_num, is_type, is_sub_type, is_longname=False): + Block.__init__(self, blkdev, blk_num, is_type, is_sub_type) + self.is_longname = is_longname + self.comment_block_id = 0 + + def _read_nac_modts(self): + """Reads the name, comment, and modifcation timestamp""" + if self.is_longname: + # In long filename mode, we have a combined field that contains + # the filename and the comment as consequtive BSTR. If the comment does + # not fit in, it is stored in an extra block + nac = self._get_bytes(-46, 112) + name_len = nac[0] + self.name = FSString(nac[1 : name_len + 1]) + comment_len = nac[name_len + 1] + if comment_len > 0: + self.comment = FSString(nac[name_len + 2 : name_len + 2 + comment_len]) + else: + # Comment is located in an extra block + self.comment_block_id = self._get_long(-18) + self.comment = FSString() + self.mod_ts = self._get_timestamp(-15) + else: + self.comment = self._get_bstr(-46, 79) + self.name = self._get_bstr(-20, 30) + self.mod_ts = self._get_timestamp(-23) - def _read_nac_modts(self): - """Reads the name, comment, and modifcation timestamp""" - if self.is_longname: - # In long filename mode, we have a combined field that contains - # the filename and the comment as consequtive BSTR. If the comment does - # not fit in, it is stored in an extra block - nac = self._get_bytes(-46,112) - name_len = nac[0] - self.name = FSString(nac[1:name_len+1]) - comment_len = nac[name_len+1] - if comment_len > 0: - self.comment = FSString(nac[name_len+2:name_len+2+comment_len]) - else: - # Comment is located in an extra block - self.comment_block_id = self._get_long(-18) - self.comment = FSString() - self.mod_ts = self._get_timestamp(-15) - else: - self.comment = self._get_bstr(-46, 79) - self.name = self._get_bstr(-20, 30) - self.mod_ts = self._get_timestamp(-23) + def _write_nac_modts(self): + """Writes the name, comment, and modifcation timestamp""" + if self.is_longname: + nac = bytearray() + name = self.name.get_ami_str() + name_len = len(name) + nac.append(name_len) + nac += name + if self.comment_block_id != 0: + nac.append(0) + else: + comment = self.name.get_ami_str() + comment_len = len(comment) + nac.append(comment_len) + nac += comment + self._put_bytes(-46, nac) + self._put_long(-18, self.comment_block_id) + self._put_timestamp(-15, self.mod_ts) + else: + self._put_bstr(-46, 79, self.comment) + self._put_timestamp(-23, self.mod_ts) + self._put_bstr(-20, 30, self.name) - def _write_nac_modts(self): - """Writes the name, comment, and modifcation timestamp""" - if self.is_longname: - nac = bytearray() - name = self.name.get_ami_str() - name_len = len(name) - nac.append(name_len) - nac += name - if self.comment_block_id != 0: - nac.append(0) - else: - comment = self.name.get_ami_str() - comment_len = len(comment) - nac.append(comment_len) - nac += comment - self._put_bytes(-46, nac) - self._put_long(-18, self.comment_block_id) - self._put_timestamp(-15, self.mod_ts) - else: - self._put_bstr(-46, 79, self.comment) - self._put_timestamp(-23, self.mod_ts) - self._put_bstr(-20, 30, self.name) - - @staticmethod - def needs_extra_comment_block(name, comment): - """Returns whether the given name/comment pair requires an extra comment block""" - return len(name) + len(comment) > 110 + @staticmethod + def needs_extra_comment_block(name, comment): + """Returns whether the given name/comment pair requires an extra comment block""" + return len(name) + len(comment) > 110 diff --git a/amitools/fs/block/FileDataBlock.py b/amitools/fs/block/FileDataBlock.py index a5d7bf45..2e003233 100644 --- a/amitools/fs/block/FileDataBlock.py +++ b/amitools/fs/block/FileDataBlock.py @@ -1,59 +1,56 @@ +from .Block import Block +class FileDataBlock(Block): + def __init__(self, blkdev, blk_num): + Block.__init__(self, blkdev, blk_num, is_type=Block.T_DATA) -from .Block import Block + def set(self, data): + self._set_data(data) + self._read() -class FileDataBlock(Block): - def __init__(self, blkdev, blk_num): - Block.__init__(self, blkdev, blk_num, is_type=Block.T_DATA) - - def set(self, data): - self._set_data(data) - self._read() - - def read(self): - self._read_data() - self._read() - - def _read(self): - Block.read(self) - if not self.valid: - return False - - # FileData fields - self.hdr_key = self._get_long(1) - self.seq_num = self._get_long(2) - self.data_size = self._get_long(3) - self.next_data = self._get_long(4) - - self.valid = True - return self.valid - - def create(self, hdr_key, seq_num, data, next_data): - Block.create(self) - self.hdr_key = hdr_key - self.seq_num = seq_num - self.data_size = len(data) - self.next_data = next_data - self.contents = data - - def write(self): - Block._create_data(self) - self._put_long(1, self.hdr_key) - self._put_long(2, self.seq_num) - self._put_long(3, self.data_size) - self._put_long(4, self.next_data) - if self.contents != None: - self.data[24:24+self.data_size] = self.contents - Block.write(self) - - def get_block_data(self): - return self.data[24:24+self.data_size] - - def dump(self): - Block.dump(self,"FileData") - print(" hdr_key: %d" % self.hdr_key) - print(" seq_num: %d" % self.seq_num) - print(" data size: %d" % self.data_size) - print(" next_data: %d" % self.next_data) - + def read(self): + self._read_data() + self._read() + + def _read(self): + Block.read(self) + if not self.valid: + return False + + # FileData fields + self.hdr_key = self._get_long(1) + self.seq_num = self._get_long(2) + self.data_size = self._get_long(3) + self.next_data = self._get_long(4) + + self.valid = True + return self.valid + + def create(self, hdr_key, seq_num, data, next_data): + Block.create(self) + self.hdr_key = hdr_key + self.seq_num = seq_num + self.data_size = len(data) + self.next_data = next_data + self.contents = data + + def write(self): + Block._create_data(self) + self._put_long(1, self.hdr_key) + self._put_long(2, self.seq_num) + self._put_long(3, self.data_size) + self._put_long(4, self.next_data) + if self.contents != None: + self.data[24 : 24 + self.data_size] = self.contents + Block.write(self) + + def get_block_data(self): + return self.data[24 : 24 + self.data_size] + + def dump(self): + Block.dump(self, "FileData") + print(" hdr_key: %d" % self.hdr_key) + print(" seq_num: %d" % self.seq_num) + print(" data size: %d" % self.data_size) + print(" next_data: %d" % self.next_data) diff --git a/amitools/fs/block/FileHeaderBlock.py b/amitools/fs/block/FileHeaderBlock.py index 519308f8..a422f66d 100644 --- a/amitools/fs/block/FileHeaderBlock.py +++ b/amitools/fs/block/FileHeaderBlock.py @@ -1,6 +1,3 @@ - - - import time from .Block import Block from .EntryBlock import EntryBlock @@ -9,110 +6,128 @@ from ..TimeStamp import * from ..FSString import FSString + class FileHeaderBlock(EntryBlock): - def __init__(self, blkdev, blk_num, is_longname): - EntryBlock.__init__(self, blkdev, blk_num, is_type=Block.T_SHORT, is_sub_type=Block.ST_FILE, is_longname=is_longname) - - def set(self, data): - self._set_data(data) - self._read() - - def read(self): - self._read_data() - self._read() - - def _read(self): - Block.read(self) - if not self.valid: - return False - - # FileHeader fields - self.own_key = self._get_long(1) - self.block_count = self._get_long(2) - self.first_data = self._get_long(4) - - # read (limited) data blocks table - bc = self.block_count - mbc = self.blkdev.block_longs - 56 - if bc > mbc: - bc = mbc - self.data_blocks = [] - for i in range(bc): - self.data_blocks.append(self._get_long(-51-i)) - - self.protect = self._get_long(-48) - self.protect_flags = ProtectFlags(self.protect) - self.byte_size = self._get_long(-47) - self._read_nac_modts() - self.hash_chain = self._get_long(-4) - self.parent = self._get_long(-3) - self.extension = self._get_long(-2) - - self.valid = (self.own_key == self.blk_num) - return self.valid - - def write(self): - Block._create_data(self) - self._put_long(1, self.own_key) - self._put_long(2, self.block_count) - self._put_long(4, self.first_data) - - # data blocks - for i in range(len(self.data_blocks)): - self._put_long(-51-i, self.data_blocks[i]) - - self._put_long(-48, self.protect) - self._put_long(-47, self.byte_size) - - self._write_nac_modts() - - self._put_long(-4, self.hash_chain) - self._put_long(-3, self.parent) - self._put_long(-2, self.extension) - Block.write(self) - - def create(self, parent, name, data_blocks, extension, byte_size=0, protect=0, comment=None, mod_ts=None, hash_chain=0): - Block.create(self) - self.own_key = self.blk_num - n = len(data_blocks) - self.block_count = n - if n == 0: - self.first_data = 0 - else: - self.first_data = data_blocks[0] - - self.data_blocks = data_blocks - self.protect = protect - self.protect_flags = ProtectFlags(self.protect) - self.byte_size = byte_size - if comment is None: - self.comment = FSString() - else: - assert isinstance(comment, FSString) - self.comment = comment - self.mod_ts = mod_ts - assert isinstance(name, FSString) - self.name = name - self.hash_chain = hash_chain - self.parent = parent - self.extension = extension - self.valid = True - return True - - def dump(self): - Block.dump(self,"FileHeader") - print(" own_key: %d" % self.own_key) - print(" blk_cnt: %d" % self.block_count) - print(" first_data: %d" % self.first_data) - if self.data_blocks != None: - print(" data blks: %s #%d" % (self.data_blocks, len(self.data_blocks))) - pf = ProtectFlags(self.protect) - print(" protect: 0x%x 0b%s %s" % (self.protect, pf.bin_str(), pf)) - print(" byte_size: %d" % self.byte_size) - print(" comment: '%s'" % self.comment) - print(" mod_ts: %s" % self.mod_ts) - print(" name: '%s'" % self.name) - print(" hash_chain: %d" % self.hash_chain) - print(" parent: %d" % self.parent) - print(" extension: %d" % self.extension) - \ No newline at end of file + def __init__(self, blkdev, blk_num, is_longname): + EntryBlock.__init__( + self, + blkdev, + blk_num, + is_type=Block.T_SHORT, + is_sub_type=Block.ST_FILE, + is_longname=is_longname, + ) + + def set(self, data): + self._set_data(data) + self._read() + + def read(self): + self._read_data() + self._read() + + def _read(self): + Block.read(self) + if not self.valid: + return False + + # FileHeader fields + self.own_key = self._get_long(1) + self.block_count = self._get_long(2) + self.first_data = self._get_long(4) + + # read (limited) data blocks table + bc = self.block_count + mbc = self.blkdev.block_longs - 56 + if bc > mbc: + bc = mbc + self.data_blocks = [] + for i in range(bc): + self.data_blocks.append(self._get_long(-51 - i)) + + self.protect = self._get_long(-48) + self.protect_flags = ProtectFlags(self.protect) + self.byte_size = self._get_long(-47) + self._read_nac_modts() + self.hash_chain = self._get_long(-4) + self.parent = self._get_long(-3) + self.extension = self._get_long(-2) + + self.valid = self.own_key == self.blk_num + return self.valid + + def write(self): + Block._create_data(self) + self._put_long(1, self.own_key) + self._put_long(2, self.block_count) + self._put_long(4, self.first_data) + + # data blocks + for i in range(len(self.data_blocks)): + self._put_long(-51 - i, self.data_blocks[i]) + + self._put_long(-48, self.protect) + self._put_long(-47, self.byte_size) + + self._write_nac_modts() + + self._put_long(-4, self.hash_chain) + self._put_long(-3, self.parent) + self._put_long(-2, self.extension) + Block.write(self) + + def create( + self, + parent, + name, + data_blocks, + extension, + byte_size=0, + protect=0, + comment=None, + mod_ts=None, + hash_chain=0, + ): + Block.create(self) + self.own_key = self.blk_num + n = len(data_blocks) + self.block_count = n + if n == 0: + self.first_data = 0 + else: + self.first_data = data_blocks[0] + + self.data_blocks = data_blocks + self.protect = protect + self.protect_flags = ProtectFlags(self.protect) + self.byte_size = byte_size + if comment is None: + self.comment = FSString() + else: + assert isinstance(comment, FSString) + self.comment = comment + self.mod_ts = mod_ts + assert isinstance(name, FSString) + self.name = name + self.hash_chain = hash_chain + self.parent = parent + self.extension = extension + self.valid = True + return True + + def dump(self): + Block.dump(self, "FileHeader") + print(" own_key: %d" % self.own_key) + print(" blk_cnt: %d" % self.block_count) + print(" first_data: %d" % self.first_data) + if self.data_blocks != None: + print(" data blks: %s #%d" % (self.data_blocks, len(self.data_blocks))) + pf = ProtectFlags(self.protect) + print(" protect: 0x%x 0b%s %s" % (self.protect, pf.bin_str(), pf)) + print(" byte_size: %d" % self.byte_size) + print(" comment: '%s'" % self.comment) + print(" mod_ts: %s" % self.mod_ts) + print(" name: '%s'" % self.name) + print(" hash_chain: %d" % self.hash_chain) + print(" parent: %d" % self.parent) + print(" extension: %d" % self.extension) diff --git a/amitools/fs/block/FileListBlock.py b/amitools/fs/block/FileListBlock.py index 0a5efe64..a2d2e2a3 100644 --- a/amitools/fs/block/FileListBlock.py +++ b/amitools/fs/block/FileListBlock.py @@ -1,72 +1,71 @@ +from .Block import Block +class FileListBlock(Block): + def __init__(self, blkdev, blk_num): + Block.__init__( + self, blkdev, blk_num, is_type=Block.T_LIST, is_sub_type=Block.ST_FILE + ) -from .Block import Block + def set(self, data): + self._set_data(data) + self._read() -class FileListBlock(Block): - def __init__(self, blkdev, blk_num): - Block.__init__(self, blkdev, blk_num, is_type=Block.T_LIST, is_sub_type=Block.ST_FILE) - - def set(self, data): - self._set_data(data) - self._read() - - def read(self): - self._read_data() - self._read() - - def _read(self): - Block.read(self) - if not self.valid: - return False - - # FileList fields - self.own_key = self._get_long(1) - self.block_count = self._get_long(2) - - # read (limited) data blocks - bc = self.block_count - mbc = self.blkdev.block_longs - 56 - if bc > mbc: - bc = mbc - self.data_blocks = [] - for i in range(bc): - self.data_blocks.append(self._get_long(-51-i)) - - self.parent = self._get_long(-3) - self.extension = self._get_long(-2) + def read(self): + self._read_data() + self._read() + + def _read(self): + Block.read(self) + if not self.valid: + return False + + # FileList fields + self.own_key = self._get_long(1) + self.block_count = self._get_long(2) + + # read (limited) data blocks + bc = self.block_count + mbc = self.blkdev.block_longs - 56 + if bc > mbc: + bc = mbc + self.data_blocks = [] + for i in range(bc): + self.data_blocks.append(self._get_long(-51 - i)) + + self.parent = self._get_long(-3) + self.extension = self._get_long(-2) + + self.valid = self.own_key == self.blk_num + return self.valid + + def create(self, parent, data_blocks, extension): + Block.create(self) + self.own_key = self.blk_num + self.block_count = len(data_blocks) + self.data_blocks = data_blocks + self.parent = parent + self.extension = extension + self.valid = True + return True + + def write(self): + Block._create_data(self) + self._put_long(1, self.own_key) + self._put_long(2, self.block_count) + + # data blocks + for i in range(len(self.data_blocks)): + self._put_long(-51 - i, self.data_blocks[i]) + + self._put_long(-3, self.parent) + self._put_long(-2, self.extension) + Block.write(self) - self.valid = (self.own_key == self.blk_num) - return self.valid - - def create(self, parent, data_blocks, extension): - Block.create(self) - self.own_key = self.blk_num - self.block_count = len(data_blocks) - self.data_blocks = data_blocks - self.parent = parent - self.extension = extension - self.valid = True - return True - - def write(self): - Block._create_data(self) - self._put_long(1, self.own_key) - self._put_long(2, self.block_count) - - # data blocks - for i in range(len(self.data_blocks)): - self._put_long(-51-i, self.data_blocks[i]) - - self._put_long(-3, self.parent) - self._put_long(-2, self.extension) - Block.write(self) - - def dump(self): - Block.dump(self,"FileList") - print(" own_key: %d" % self.own_key) - print(" blk_cnt: %d" % self.block_count) - print(" data blks: %s" % self.data_blocks) - print(" parent: %d" % self.parent) - print(" extension: %d" % self.extension) - \ No newline at end of file + def dump(self): + Block.dump(self, "FileList") + print(" own_key: %d" % self.own_key) + print(" blk_cnt: %d" % self.block_count) + print(" data blks: %s" % self.data_blocks) + print(" parent: %d" % self.parent) + print(" extension: %d" % self.extension) diff --git a/amitools/fs/block/RootBlock.py b/amitools/fs/block/RootBlock.py index bc3907bc..814f9a26 100644 --- a/amitools/fs/block/RootBlock.py +++ b/amitools/fs/block/RootBlock.py @@ -1,6 +1,3 @@ - - - import time from .Block import Block @@ -8,130 +5,132 @@ class RootBlock(Block): - def __init__(self, blkdev, blk_num): - Block.__init__(self, blkdev, blk_num, is_type=Block.T_SHORT, is_sub_type=Block.ST_ROOT) - - def create(self, name, create_ts=None, disk_ts=None, mod_ts=None, extension=0, fstype=0): - Block.create(self) - # init fresh hash table - self.hash_size = self.blkdev.block_longs - 56 - self.hash_table = [] - for i in range(self.hash_size): - self.hash_table.append(0) - - # timestamps - self.mod_ts = mod_ts - self.disk_ts = disk_ts - self.create_ts = create_ts - - # name - self.name = name - - # bitmap: blank - self.bitmap_flag = 0xffffffff - self.bitmap_ptrs = [] - for i in range(25): - self.bitmap_ptrs.append(0) - self.bitmap_ext_blk = 0 - - # new stuff for DOS6 and DOS7 - self.fstype = fstype - self.blocks_used = 0 - - self.extension = extension - - def write(self): - self._create_data() - - # hash table - self._put_long(3, self.hash_size) - for i in range(self.hash_size): - self._put_long(6+i, self.hash_table[i]) - - # bitmap - self._put_long(-50, self.bitmap_flag) - for i in range(25): - self._put_long(-49+i, self.bitmap_ptrs[i]) - self._put_long(-24, self.bitmap_ext_blk) - - # timestamps - self._put_timestamp(-23, self.mod_ts) - self._put_timestamp(-10, self.disk_ts) - self._put_timestamp(-7, self.create_ts) - - # name - self._put_bstr(-20, 30, self.name) - self._put_long(-2, self.extension) - - # DOS6 and DOS7 stuff - self._put_long(-11, self.blocks_used) - self._put_long(-4, self.fstype) - - Block.write(self) - - def set(self, data): - self._set_data(data) - self._read() - - def read(self): - self._read_data() - self._read() - - def _read(self): - Block.read(self) - if not self.valid: - return False - - # name hash (limit to max size) - self.hash_size = self._get_long(3) - - # read (limited) hash - hs = self.hash_size - mhs = self.blkdev.block_longs - 56 - if hs > mhs: - hs = mhs - self.hash_table = [] - for i in range(hs): - self.hash_table.append(self._get_long(6+i)) - - # bitmap - self.bitmap_flag = self._get_long(-50) - self.bitmap_ptrs = [] - for i in range(25): - bm_blk = self._get_long(-49+i) - self.bitmap_ptrs.append(bm_blk) - self.bitmap_ext_blk = self._get_long(-24) - - # timestamps - self.mod_ts = self._get_timestamp(-23) - self.disk_ts = self._get_timestamp(-10) - self.create_ts = self._get_timestamp(-7) - - # name - self.name = self._get_bstr(-20, 30) - self.extension = self._get_long(-2) - - # Number of used blocks (new in DOS6 and DOS7) - self.blocks_used = self._get_long(-11) - # filesystem type (new in DOS6 and DOS7, 0 in others) - self.fstype = self._get_long(-4) - - # check validity - self.valid = True - #self.valid = (self.bitmap_flag == 0xffffffff) - return self.valid - - def dump(self): - Block.dump(self, "Root") - print(" hash size: %d" % self.hash_size) - print(" hash table:%s" % self.hash_table) - print(" bmp flag: 0x%08x" % self.bitmap_flag) - print(" bmp ptrs: %s" % self.bitmap_ptrs) - print(" bmp ext: %d" % self.bitmap_ext_blk) - print(" mod_ts: %s" % self.mod_ts) - print(" disk_ts: %s" % self.disk_ts) - print(" create_ts: %s" % self.create_ts) - print(" disk name: %s" % self.name) - print(" extension: %s" % self.extension) - - \ No newline at end of file + def __init__(self, blkdev, blk_num): + Block.__init__( + self, blkdev, blk_num, is_type=Block.T_SHORT, is_sub_type=Block.ST_ROOT + ) + + def create( + self, name, create_ts=None, disk_ts=None, mod_ts=None, extension=0, fstype=0 + ): + Block.create(self) + # init fresh hash table + self.hash_size = self.blkdev.block_longs - 56 + self.hash_table = [] + for i in range(self.hash_size): + self.hash_table.append(0) + + # timestamps + self.mod_ts = mod_ts + self.disk_ts = disk_ts + self.create_ts = create_ts + + # name + self.name = name + + # bitmap: blank + self.bitmap_flag = 0xFFFFFFFF + self.bitmap_ptrs = [] + for i in range(25): + self.bitmap_ptrs.append(0) + self.bitmap_ext_blk = 0 + + # new stuff for DOS6 and DOS7 + self.fstype = fstype + self.blocks_used = 0 + + self.extension = extension + + def write(self): + self._create_data() + + # hash table + self._put_long(3, self.hash_size) + for i in range(self.hash_size): + self._put_long(6 + i, self.hash_table[i]) + + # bitmap + self._put_long(-50, self.bitmap_flag) + for i in range(25): + self._put_long(-49 + i, self.bitmap_ptrs[i]) + self._put_long(-24, self.bitmap_ext_blk) + + # timestamps + self._put_timestamp(-23, self.mod_ts) + self._put_timestamp(-10, self.disk_ts) + self._put_timestamp(-7, self.create_ts) + + # name + self._put_bstr(-20, 30, self.name) + self._put_long(-2, self.extension) + + # DOS6 and DOS7 stuff + self._put_long(-11, self.blocks_used) + self._put_long(-4, self.fstype) + + Block.write(self) + + def set(self, data): + self._set_data(data) + self._read() + + def read(self): + self._read_data() + self._read() + + def _read(self): + Block.read(self) + if not self.valid: + return False + + # name hash (limit to max size) + self.hash_size = self._get_long(3) + + # read (limited) hash + hs = self.hash_size + mhs = self.blkdev.block_longs - 56 + if hs > mhs: + hs = mhs + self.hash_table = [] + for i in range(hs): + self.hash_table.append(self._get_long(6 + i)) + + # bitmap + self.bitmap_flag = self._get_long(-50) + self.bitmap_ptrs = [] + for i in range(25): + bm_blk = self._get_long(-49 + i) + self.bitmap_ptrs.append(bm_blk) + self.bitmap_ext_blk = self._get_long(-24) + + # timestamps + self.mod_ts = self._get_timestamp(-23) + self.disk_ts = self._get_timestamp(-10) + self.create_ts = self._get_timestamp(-7) + + # name + self.name = self._get_bstr(-20, 30) + self.extension = self._get_long(-2) + + # Number of used blocks (new in DOS6 and DOS7) + self.blocks_used = self._get_long(-11) + # filesystem type (new in DOS6 and DOS7, 0 in others) + self.fstype = self._get_long(-4) + + # check validity + self.valid = True + # self.valid = (self.bitmap_flag == 0xffffffff) + return self.valid + + def dump(self): + Block.dump(self, "Root") + print(" hash size: %d" % self.hash_size) + print(" hash table:%s" % self.hash_table) + print(" bmp flag: 0x%08x" % self.bitmap_flag) + print(" bmp ptrs: %s" % self.bitmap_ptrs) + print(" bmp ext: %d" % self.bitmap_ext_blk) + print(" mod_ts: %s" % self.mod_ts) + print(" disk_ts: %s" % self.disk_ts) + print(" create_ts: %s" % self.create_ts) + print(" disk name: %s" % self.name) + print(" extension: %s" % self.extension) diff --git a/amitools/fs/block/UserDirBlock.py b/amitools/fs/block/UserDirBlock.py index 53b97ee0..9ac6cf28 100644 --- a/amitools/fs/block/UserDirBlock.py +++ b/amitools/fs/block/UserDirBlock.py @@ -1,90 +1,103 @@ - - - import time from .Block import Block from .EntryBlock import EntryBlock from ..ProtectFlags import ProtectFlags from ..FSString import FSString + class UserDirBlock(EntryBlock): - def __init__(self, blkdev, blk_num, is_longname): - EntryBlock.__init__(self, blkdev, blk_num, is_type=Block.T_SHORT, is_sub_type=Block.ST_USERDIR,is_longname=is_longname) - - def set(self, data): - self._set_data(data) - self._read() - - def read(self): - self._read_data() - self._read() - - def _read(self): - Block.read(self) - if not self.valid: - return False + def __init__(self, blkdev, blk_num, is_longname): + EntryBlock.__init__( + self, + blkdev, + blk_num, + is_type=Block.T_SHORT, + is_sub_type=Block.ST_USERDIR, + is_longname=is_longname, + ) + + def set(self, data): + self._set_data(data) + self._read() + + def read(self): + self._read_data() + self._read() + + def _read(self): + Block.read(self) + if not self.valid: + return False + + # UserDir fields + self.own_key = self._get_long(1) + self.protect = self._get_long(-48) + self._read_nac_modts() + self.hash_chain = self._get_long(-4) + self.parent = self._get_long(-3) + self.extension = self._get_long(-2) + + # hash table of entries + self.hash_table = [] + self.hash_size = self.blkdev.block_longs - 56 + for i in range(self.hash_size): + self.hash_table.append(self._get_long(6 + i)) - # UserDir fields - self.own_key = self._get_long(1) - self.protect = self._get_long(-48) - self._read_nac_modts() - self.hash_chain = self._get_long(-4) - self.parent = self._get_long(-3) - self.extension = self._get_long(-2) + self.valid = self.own_key == self.blk_num + return self.valid - # hash table of entries - self.hash_table = [] - self.hash_size = self.blkdev.block_longs - 56 - for i in range(self.hash_size): - self.hash_table.append(self._get_long(6+i)) - - self.valid = (self.own_key == self.blk_num) - return self.valid + def create( + self, + parent, + name, + protect=0, + comment=None, + mod_ts=None, + hash_chain=0, + extension=0, + ): + Block.create(self) + self.own_key = self.blk_num + self.protect = protect + if comment is None: + self.comment = FSString() + else: + self.comment = comment + # timestamps + self.mod_ts = mod_ts + self.name = name + self.hash_chain = hash_chain + self.parent = parent + self.extension = extension + # empty hash table + self.hash_table = [] + self.hash_size = self.blkdev.block_longs - 56 + for i in range(self.hash_size): + self.hash_table.append(0) + self.valid = True + return True - def create(self, parent, name, protect=0, comment=None, mod_ts=None, hash_chain=0, extension=0): - Block.create(self) - self.own_key = self.blk_num - self.protect = protect - if comment is None: - self.comment = FSString() - else: - self.comment = comment - # timestamps - self.mod_ts = mod_ts - self.name = name - self.hash_chain = hash_chain - self.parent = parent - self.extension = extension - # empty hash table - self.hash_table = [] - self.hash_size = self.blkdev.block_longs - 56 - for i in range(self.hash_size): - self.hash_table.append(0) - self.valid = True - return True - - def write(self): - Block._create_data(self) - self._put_long(1, self.own_key) - self._put_long(-48, self.protect) - self._write_nac_modts() - self._put_long(-4, self.hash_chain) - self._put_long(-3, self.parent) - self._put_long(-2, self.extension) - # hash table - for i in range(self.hash_size): - self._put_long(6+i, self.hash_table[i]) - Block.write(self) - - def dump(self): - Block.dump(self,"UserDir") - print(" own_key: %d" % (self.own_key)) - pf = ProtectFlags(self.protect) - print(" protect: 0x%x 0b%s %s" % (self.protect, pf.bin_str(), pf)) - print(" comment: '%s'" % self.comment) - print(" mod_ts: %s" % self.mod_ts) - print(" name: '%s'" % self.name) - print(" hash_chain: %d" % self.hash_chain) - print(" parent: %d" % self.parent) - print(" extension: %s" % self.extension) + def write(self): + Block._create_data(self) + self._put_long(1, self.own_key) + self._put_long(-48, self.protect) + self._write_nac_modts() + self._put_long(-4, self.hash_chain) + self._put_long(-3, self.parent) + self._put_long(-2, self.extension) + # hash table + for i in range(self.hash_size): + self._put_long(6 + i, self.hash_table[i]) + Block.write(self) + def dump(self): + Block.dump(self, "UserDir") + print(" own_key: %d" % (self.own_key)) + pf = ProtectFlags(self.protect) + print(" protect: 0x%x 0b%s %s" % (self.protect, pf.bin_str(), pf)) + print(" comment: '%s'" % self.comment) + print(" mod_ts: %s" % self.mod_ts) + print(" name: '%s'" % self.name) + print(" hash_chain: %d" % self.hash_chain) + print(" parent: %d" % self.parent) + print(" extension: %s" % self.extension) diff --git a/amitools/fs/block/rdb/BadBlocksBlock.py b/amitools/fs/block/rdb/BadBlocksBlock.py index ec9d0951..12930acf 100644 --- a/amitools/fs/block/rdb/BadBlocksBlock.py +++ b/amitools/fs/block/rdb/BadBlocksBlock.py @@ -1,61 +1,59 @@ +from ..Block import Block +class BadBlockBlock(Block): + def __init__(self, blkdev, blk_num=0): + Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.BADB) -from ..Block import Block + def create(self, block_pairs, host_id, size=128, next=0): + Block.create(self) + self.size = size + self.host_id = host_id + self.next = next + self.block_pairs = block_pairs -class BadBlockBlock(Block): - def __init__(self, blkdev, blk_num=0): - Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.BADB) - - def create(self, block_pairs, host_id, size=128, next=0): - Block.create(self) - self.size = size - self.host_id = host_id - self.next = next - self.block_pairs = block_pairs - - def write(self): - self._create_data() - - self._put_long(1, self.size) - self._put_long(3, self.host_id) - self._put_long(4, self.next) - - # block_pairs: bad, good, bad, good ... - off = 6 - for b in self.block_pairs: - self._put_long(off, b) - off += 1 - - Block.write(self) - - def read(self): - Block.read(self) - if not self.valid: - return False - - self.size = self._get_long(1) - self.host_id = self._get_long(3) - self.next = self._get_long(4) - - self.block_pairs = [] - off = 6 - while off < self.block_longs: - b = self._get_long(off) - if b == 0 or b == 0xffffffff: - break - self.block_pairs.append(b) - - return self.valid - - def dump(self): - Block.dump(self, "RDBlock") - - print(" size: %d" % self.size) - print(" host_id: %d" % self.host_id) - print(" next: %s" % self._dump_ptr(self.next)) - n = len(self.block_pairs) // 2 - o = 0 - for i in range(n): - print(" bad=%d good=%d" % (self.block_pairs[o], self.block_pairs[o+1])) - o += 2 + def write(self): + self._create_data() + + self._put_long(1, self.size) + self._put_long(3, self.host_id) + self._put_long(4, self.next) + + # block_pairs: bad, good, bad, good ... + off = 6 + for b in self.block_pairs: + self._put_long(off, b) + off += 1 + + Block.write(self) + + def read(self): + Block.read(self) + if not self.valid: + return False + + self.size = self._get_long(1) + self.host_id = self._get_long(3) + self.next = self._get_long(4) + + self.block_pairs = [] + off = 6 + while off < self.block_longs: + b = self._get_long(off) + if b == 0 or b == 0xFFFFFFFF: + break + self.block_pairs.append(b) + + return self.valid + + def dump(self): + Block.dump(self, "RDBlock") + + print(" size: %d" % self.size) + print(" host_id: %d" % self.host_id) + print(" next: %s" % self._dump_ptr(self.next)) + n = len(self.block_pairs) // 2 + o = 0 + for i in range(n): + print(" bad=%d good=%d" % (self.block_pairs[o], self.block_pairs[o + 1])) + o += 2 diff --git a/amitools/fs/block/rdb/FSHeaderBlock.py b/amitools/fs/block/rdb/FSHeaderBlock.py index 5ac2601d..40b19534 100644 --- a/amitools/fs/block/rdb/FSHeaderBlock.py +++ b/amitools/fs/block/rdb/FSHeaderBlock.py @@ -1,202 +1,244 @@ - - - from amitools.fs.block.Block import * import amitools.fs.DosType as DosType -class FSHeaderDeviceNode: - valid_flags = ('type', 'task', 'lock', 'handler', 'stack_size', - 'priority', 'startup', 'seg_list_blk', 'global_vec') - - def __init__(self, type=0, task=0, lock=0, handler=0, stack_size=0, priority=0, - startup=0, seg_list_blk=0, global_vec=0): - self.type = type - self.task = task - self.lock = lock - self.handler = handler - self.stack_size = stack_size - self.priority = priority - self.startup = startup - self.seg_list_blk = seg_list_blk - self.global_vec = global_vec - - def dump(self): - print("DeviceNode") - print(" type: 0x%08x" % self.type) - print(" task: 0x%08x" % self.task) - print(" lock: 0x%08x" % self.lock) - print(" handler: 0x%08x" % self.handler) - print(" stack_size: 0x%08x" % self.stack_size) - print(" seg_list_blk: 0x%08x" % self.seg_list_blk) - print(" global_vec: 0x%08x" % self.global_vec) - - def get_flags(self, patch_flags = 0x1ff): - res = [] - if patch_flags & 0x01 == 0x01: - res.append(('type',self.type)) - if patch_flags & 0x02 == 0x02: - res.append(('task',self.task)) - if patch_flags & 0x04 == 0x04: - res.append(('lock',self.lock)) - if patch_flags & 0x08 == 0x08: - res.append(('handler',self.handler)) - if patch_flags & 0x10 == 0x10: - res.append(('stack_size',self.stack_size)) - if patch_flags & 0x20 == 0x20: - res.append(('priority',self.priority)) - if patch_flags & 0x40 == 0x40: - res.append(('startup',self.startup)) - if patch_flags & 0x80 == 0x80: - res.append(('seg_list_blk',self.seg_list_blk)) - if patch_flags & 0x100 == 0x100: - res.append(('global_vec',self.global_vec)) - return res - - def set_flags(self, flags): - mask = 0 - for f in flags: - key = f[0] - val = int(f[1]) - if key == 'type': - self.type = val - mask |= 0x01 - elif key == 'task': - self.task = val - mask |= 0x02 - elif key == 'lock': - self.lock = val - mask |= 0x04 - elif key == 'handler': - self.handler = val - mask |= 0x08 - elif key == 'stack_size': - self.stack_size = val - mask |= 0x10 - elif key == 'priority': - self.priority = val - mask |= 0x20 - elif key == 'startup': - self.startup = val - mask |= 0x40 - elif key == 'seg_list_blk': - self.seg_list_blk = val - mask |= 0x80 - elif key == 'global_vec': - self.global_vec = val - mask |= 0x100 - else: - raise ValueError("Invalid flag: "+key) - return mask - - def get_valid_flag_names(self): - return ('type', 'task', 'lock', 'handler', 'stack_size', 'priority', 'startup', 'seg_list_blk', 'global_vec') - - def read(self, blk): - self.type = blk._get_long(11) - self.task = blk._get_long(12) - self.lock = blk._get_long(13) - self.handler = blk._get_long(14) - self.stack_size = blk._get_long(15) - self.priority = blk._get_long(16) - self.startup = blk._get_long(17) - self.seg_list_blk = blk._get_long(18) - self.global_vec = blk._get_long(19) - - def write(self, blk): - blk._put_long(11, self.type) - blk._put_long(12, self.task) - blk._put_long(13, self.lock) - blk._put_long(14, self.handler) - blk._put_long(15, self.stack_size) - blk._put_long(16, self.priority) - blk._put_long(17, self.startup) - blk._put_long(18, self.seg_list_blk) - blk._put_long(19, self.global_vec) - -class FSHeaderBlock(Block): - def __init__(self, blkdev, blk_num): - Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.FSHD) - - def create(self, host_id=0, next=Block.no_blk, flags=0, dos_type=0, version=0, patch_flags=0, - size=64, dev_node=None): - Block.create(self) - self.size = size - self.host_id = host_id - self.next = next - self.flags = flags - - self.dos_type = dos_type - self.version = version - self.patch_flags = patch_flags - - if dev_node == None: - dev_node = FSHeaderDeviceNode() - self.dev_node = dev_node - - def write(self): - self._create_data() - - self._put_long(1, self.size) - self._put_long(3, self.host_id) - self._put_long(4, self.next) - self._put_long(5, self.flags) - - self._put_long(8, self.dos_type) - self._put_long(9, self.version) - self._put_long(10, self.patch_flags) - - self.dev_node.write(self) - - Block.write(self) - def read(self): - Block.read(self) - if not self.valid: - return False - - self.size = self._get_long(1) - self.host_id = self._get_long(3) - self.next = self._get_long(4) - self.flags = self._get_long(5) - - self.dos_type = self._get_long(8) - self.version = self._get_long(9) - self.patch_flags = self._get_long(10) - - self.dev_node = FSHeaderDeviceNode() - self.dev_node.read(self) - - return self.valid - - def get_version_tuple(self): - return ((self.version >> 16),(self.version & 0xffff)) - - def get_version_string(self): - return "%d.%d" % self.get_version_tuple() - - def get_flags(self): - return self.dev_node.get_flags(self.patch_flags) - - def get_valid_flag_names(self): - return self.dev_node.get_valid_flag_names() - - def dump(self): - Block.dump(self, "FSHeader") - - print(" size: %d" % self.size) - print(" host_id: %d" % self.host_id) - print(" next: %s" % self._dump_ptr(self.next)) - print(" flags: 0x%08x" % self.flags) - print(" dos_type: 0x%08x = %s" % (self.dos_type, DosType.num_to_tag_str(self.dos_type))) - print(" version: 0x%08x = %s" % (self.version, self.get_version_string())) - print(" patch_flags: 0x%08x" % self.patch_flags) - - self.dev_node.dump() +class FSHeaderDeviceNode: + valid_flags = ( + "type", + "task", + "lock", + "handler", + "stack_size", + "priority", + "startup", + "seg_list_blk", + "global_vec", + ) + + def __init__( + self, + type=0, + task=0, + lock=0, + handler=0, + stack_size=0, + priority=0, + startup=0, + seg_list_blk=0, + global_vec=0, + ): + self.type = type + self.task = task + self.lock = lock + self.handler = handler + self.stack_size = stack_size + self.priority = priority + self.startup = startup + self.seg_list_blk = seg_list_blk + self.global_vec = global_vec + + def dump(self): + print("DeviceNode") + print(" type: 0x%08x" % self.type) + print(" task: 0x%08x" % self.task) + print(" lock: 0x%08x" % self.lock) + print(" handler: 0x%08x" % self.handler) + print(" stack_size: 0x%08x" % self.stack_size) + print(" seg_list_blk: 0x%08x" % self.seg_list_blk) + print(" global_vec: 0x%08x" % self.global_vec) + + def get_flags(self, patch_flags=0x1FF): + res = [] + if patch_flags & 0x01 == 0x01: + res.append(("type", self.type)) + if patch_flags & 0x02 == 0x02: + res.append(("task", self.task)) + if patch_flags & 0x04 == 0x04: + res.append(("lock", self.lock)) + if patch_flags & 0x08 == 0x08: + res.append(("handler", self.handler)) + if patch_flags & 0x10 == 0x10: + res.append(("stack_size", self.stack_size)) + if patch_flags & 0x20 == 0x20: + res.append(("priority", self.priority)) + if patch_flags & 0x40 == 0x40: + res.append(("startup", self.startup)) + if patch_flags & 0x80 == 0x80: + res.append(("seg_list_blk", self.seg_list_blk)) + if patch_flags & 0x100 == 0x100: + res.append(("global_vec", self.global_vec)) + return res + + def set_flags(self, flags): + mask = 0 + for f in flags: + key = f[0] + val = int(f[1]) + if key == "type": + self.type = val + mask |= 0x01 + elif key == "task": + self.task = val + mask |= 0x02 + elif key == "lock": + self.lock = val + mask |= 0x04 + elif key == "handler": + self.handler = val + mask |= 0x08 + elif key == "stack_size": + self.stack_size = val + mask |= 0x10 + elif key == "priority": + self.priority = val + mask |= 0x20 + elif key == "startup": + self.startup = val + mask |= 0x40 + elif key == "seg_list_blk": + self.seg_list_blk = val + mask |= 0x80 + elif key == "global_vec": + self.global_vec = val + mask |= 0x100 + else: + raise ValueError("Invalid flag: " + key) + return mask + + def get_valid_flag_names(self): + return ( + "type", + "task", + "lock", + "handler", + "stack_size", + "priority", + "startup", + "seg_list_blk", + "global_vec", + ) + + def read(self, blk): + self.type = blk._get_long(11) + self.task = blk._get_long(12) + self.lock = blk._get_long(13) + self.handler = blk._get_long(14) + self.stack_size = blk._get_long(15) + self.priority = blk._get_long(16) + self.startup = blk._get_long(17) + self.seg_list_blk = blk._get_long(18) + self.global_vec = blk._get_long(19) + + def write(self, blk): + blk._put_long(11, self.type) + blk._put_long(12, self.task) + blk._put_long(13, self.lock) + blk._put_long(14, self.handler) + blk._put_long(15, self.stack_size) + blk._put_long(16, self.priority) + blk._put_long(17, self.startup) + blk._put_long(18, self.seg_list_blk) + blk._put_long(19, self.global_vec) - def set_flags(self, flags): - mask = self.dev_node.set_flags(flags) - self.patch_flags |= mask - def set_flag(self, key, value): - mask = self.dev_node.set_flags([(key,value)]) - self.patch_flags |= mask +class FSHeaderBlock(Block): + def __init__(self, blkdev, blk_num): + Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.FSHD) + + def create( + self, + host_id=0, + next=Block.no_blk, + flags=0, + dos_type=0, + version=0, + patch_flags=0, + size=64, + dev_node=None, + ): + Block.create(self) + self.size = size + self.host_id = host_id + self.next = next + self.flags = flags + + self.dos_type = dos_type + self.version = version + self.patch_flags = patch_flags + + if dev_node == None: + dev_node = FSHeaderDeviceNode() + self.dev_node = dev_node + + def write(self): + self._create_data() + + self._put_long(1, self.size) + self._put_long(3, self.host_id) + self._put_long(4, self.next) + self._put_long(5, self.flags) + + self._put_long(8, self.dos_type) + self._put_long(9, self.version) + self._put_long(10, self.patch_flags) + + self.dev_node.write(self) + + Block.write(self) + + def read(self): + Block.read(self) + if not self.valid: + return False + + self.size = self._get_long(1) + self.host_id = self._get_long(3) + self.next = self._get_long(4) + self.flags = self._get_long(5) + + self.dos_type = self._get_long(8) + self.version = self._get_long(9) + self.patch_flags = self._get_long(10) + + self.dev_node = FSHeaderDeviceNode() + self.dev_node.read(self) + + return self.valid + + def get_version_tuple(self): + return ((self.version >> 16), (self.version & 0xFFFF)) + + def get_version_string(self): + return "%d.%d" % self.get_version_tuple() + + def get_flags(self): + return self.dev_node.get_flags(self.patch_flags) + + def get_valid_flag_names(self): + return self.dev_node.get_valid_flag_names() + + def dump(self): + Block.dump(self, "FSHeader") + + print(" size: %d" % self.size) + print(" host_id: %d" % self.host_id) + print(" next: %s" % self._dump_ptr(self.next)) + print(" flags: 0x%08x" % self.flags) + print( + " dos_type: 0x%08x = %s" + % (self.dos_type, DosType.num_to_tag_str(self.dos_type)) + ) + print( + " version: 0x%08x = %s" % (self.version, self.get_version_string()) + ) + print(" patch_flags: 0x%08x" % self.patch_flags) + + self.dev_node.dump() + + def set_flags(self, flags): + mask = self.dev_node.set_flags(flags) + self.patch_flags |= mask + + def set_flag(self, key, value): + mask = self.dev_node.set_flags([(key, value)]) + self.patch_flags |= mask diff --git a/amitools/fs/block/rdb/LoadSegBlock.py b/amitools/fs/block/rdb/LoadSegBlock.py index f835760f..d922b7f9 100644 --- a/amitools/fs/block/rdb/LoadSegBlock.py +++ b/amitools/fs/block/rdb/LoadSegBlock.py @@ -1,51 +1,49 @@ +from ..Block import Block +class LoadSegBlock(Block): + def __init__(self, blkdev, blk_num=0): + Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.LSEG) -from ..Block import Block + def create(self, host_id=0, next=Block.no_blk, size=128): + Block.create(self) + self.size = size + self.host_id = host_id + self.next = next -class LoadSegBlock(Block): - def __init__(self, blkdev, blk_num=0): - Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.LSEG) - - def create(self, host_id=0, next=Block.no_blk, size=128): - Block.create(self) - self.size = size - self.host_id = host_id - self.next = next - - def write(self): - if self.data == None: - self._create_data() - - self._put_long(1, self.size) - self._put_long(3, self.host_id) - self._put_long(4, self.next) - - Block.write(self) - - def set_data(self, data): - if self.data == None: - self._create_data() - self.data[20:20+len(data)] = data - self.size = (20 + len(data)) // 4 - - def get_data(self): - return self.data[20:20+(self.size-5)*4] - - def read(self): - Block.read(self) - if not self.valid: - return False - - self.size = self._get_long(1) - self.host_id = self._get_long(3) - self.next = self._get_long(4) - - return self.valid - - def dump(self): - Block.dump(self, "RDBlock") - - print(" size: %d" % self.size) - print(" host_id: %d" % self.host_id) - print(" next: %s" % self._dump_ptr(self.next)) + def write(self): + if self.data == None: + self._create_data() + + self._put_long(1, self.size) + self._put_long(3, self.host_id) + self._put_long(4, self.next) + + Block.write(self) + + def set_data(self, data): + if self.data == None: + self._create_data() + self.data[20 : 20 + len(data)] = data + self.size = (20 + len(data)) // 4 + + def get_data(self): + return self.data[20 : 20 + (self.size - 5) * 4] + + def read(self): + Block.read(self) + if not self.valid: + return False + + self.size = self._get_long(1) + self.host_id = self._get_long(3) + self.next = self._get_long(4) + + return self.valid + + def dump(self): + Block.dump(self, "RDBlock") + + print(" size: %d" % self.size) + print(" host_id: %d" % self.host_id) + print(" next: %s" % self._dump_ptr(self.next)) diff --git a/amitools/fs/block/rdb/PartitionBlock.py b/amitools/fs/block/rdb/PartitionBlock.py index 1690cf2a..80a22fea 100644 --- a/amitools/fs/block/rdb/PartitionBlock.py +++ b/amitools/fs/block/rdb/PartitionBlock.py @@ -1,170 +1,207 @@ - - - from amitools.fs.block.Block import * import amitools.fs.DosType as DosType from amitools.fs.FSString import FSString -class PartitionDosEnv: - valid_keys = ('max_transfer', 'mask', 'num_buffer', 'reserved', 'boot_pri', 'pre_alloc', 'boot_blocks') - - def __init__(self, size=16, block_size=128, sec_org=0, surfaces=0, sec_per_blk=1, blk_per_trk=0, - reserved=2, pre_alloc=0, interleave=0, low_cyl=0, high_cyl=0, num_buffer=30, - buf_mem_type=0, max_transfer=0xffffff, mask=0x7ffffffe, boot_pri=0, dos_type=DosType.DOS0, - baud=0, control=0, boot_blocks=0): - self.size = size - self.block_size = block_size - self.sec_org = sec_org - self.surfaces = surfaces - self.sec_per_blk = sec_per_blk - self.blk_per_trk = blk_per_trk - self.reserved = reserved - self.pre_alloc = pre_alloc - self.interleave = interleave - self.low_cyl = low_cyl - self.high_cyl = high_cyl - self.num_buffer = num_buffer - self.buf_mem_type = buf_mem_type - self.max_transfer = max_transfer - self.mask = mask - self.boot_pri = boot_pri - self.dos_type = dos_type - self.baud = baud - self.control = control - self.boot_blocks = boot_blocks - - def dump(self): - print("DosEnv") - print(" size: %d" % self.size) - print(" block_size: %d" % self.block_size) - print(" sec_org: %d" % self.sec_org) - print(" surfaces: %d" % self.surfaces) - print(" sec_per_blk: %d" % self.sec_per_blk) - print(" blk_per_trk: %d" % self.blk_per_trk) - print(" reserved: %d" % self.reserved) - print(" pre_alloc: %d" % self.pre_alloc) - print(" interleave: %d" % self.interleave) - print(" low_cyl: %d" % self.low_cyl) - print(" high_cyl: %d" % self.high_cyl) - print(" num_buffer: %d" % self.num_buffer) - print(" buf_mem_type: 0x%08x" % self.buf_mem_type) - print(" max_transfer: 0x%08x" % self.max_transfer) - print(" mask: 0x%08x" % self.mask) - print(" boot_pri: %d" % self.boot_pri) - print(" dos_type: 0x%08x = %s" % (self.dos_type, DosType.num_to_tag_str(self.dos_type))) - print(" baud: %d" % self.baud) - print(" control: %d" % self.control) - print(" boot_blocks: %d" % self.boot_blocks) - - def read(self, blk): - self.size = blk._get_long(32) - self.block_size = blk._get_long(33) - self.sec_org = blk._get_long(34) - self.surfaces = blk._get_long(35) - self.sec_per_blk = blk._get_long(36) - self.blk_per_trk = blk._get_long(37) - self.reserved = blk._get_long(38) - self.pre_alloc = blk._get_long(39) - self.interleave = blk._get_long(40) - self.low_cyl = blk._get_long(41) - self.high_cyl = blk._get_long(42) - self.num_buffer = blk._get_long(43) - self.buf_mem_type = blk._get_long(44) - self.max_transfer = blk._get_long(45) - self.mask = blk._get_long(46) - self.boot_pri = blk._get_slong(47) - self.dos_type = blk._get_long(48) - self.baud = blk._get_long(49) - self.control = blk._get_long(50) - self.boot_blocks = blk._get_long(51) - - def write(self, blk): - blk._put_long(32, self.size) - blk._put_long(33, self.block_size) - blk._put_long(34, self.sec_org) - blk._put_long(35, self.surfaces) - blk._put_long(36, self.sec_per_blk) - blk._put_long(37, self.blk_per_trk) - blk._put_long(38, self.reserved) - blk._put_long(39, self.pre_alloc) - blk._put_long(40, self.interleave) - blk._put_long(41, self.low_cyl) - blk._put_long(42, self.high_cyl) - blk._put_long(43, self.num_buffer) - blk._put_long(44, self.buf_mem_type) - blk._put_long(45, self.max_transfer) - blk._put_long(46, self.mask) - blk._put_slong(47, self.boot_pri) - blk._put_long(48, self.dos_type) - blk._put_long(49, self.baud) - blk._put_long(50, self.control) - blk._put_long(51, self.boot_blocks) - -class PartitionBlock(Block): - FLAG_BOOTABLE = 1 - FLAG_NO_AUTOMOUNT = 2 - - def __init__(self, blkdev, blk_num): - Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.PART) - - def create(self, drv_name, dos_env, host_id=7, next=Block.no_blk, flags=0, dev_flags=0, - size=64): - Block.create(self) - self.size = size - self.host_id = host_id - self.next = next - self.flags = flags - - self.dev_flags = dev_flags - assert isinstance(drv_name, FSString) - self.drv_name = drv_name - - if dos_env == None: - dos_env = PartitionDosEnv() - self.dos_env = dos_env - self.valid = True - - def write(self): - self._create_data() - self._put_long(1, self.size) - self._put_long(3, self.host_id) - self._put_long(4, self.next) - self._put_long(5, self.flags) - - self._put_long(8, self.dev_flags) - self._put_bstr(9, 31, self.drv_name) - - self.dos_env.write(self) - - Block.write(self) - - def read(self): - Block.read(self) - if not self.valid: - return False - - self.size = self._get_long(1) - self.host_id = self._get_long(3) - self.next = self._get_long(4) - self.flags = self._get_long(5) - - self.dev_flags = self._get_long(8) - self.drv_name = self._get_bstr(9, 31) - - self.dos_env = PartitionDosEnv() - self.dos_env.read(self) - - return self.valid - - def dump(self): - Block.dump(self, "Partition") +class PartitionDosEnv: + valid_keys = ( + "max_transfer", + "mask", + "num_buffer", + "reserved", + "boot_pri", + "pre_alloc", + "boot_blocks", + ) + + def __init__( + self, + size=16, + block_size=128, + sec_org=0, + surfaces=0, + sec_per_blk=1, + blk_per_trk=0, + reserved=2, + pre_alloc=0, + interleave=0, + low_cyl=0, + high_cyl=0, + num_buffer=30, + buf_mem_type=0, + max_transfer=0xFFFFFF, + mask=0x7FFFFFFE, + boot_pri=0, + dos_type=DosType.DOS0, + baud=0, + control=0, + boot_blocks=0, + ): + self.size = size + self.block_size = block_size + self.sec_org = sec_org + self.surfaces = surfaces + self.sec_per_blk = sec_per_blk + self.blk_per_trk = blk_per_trk + self.reserved = reserved + self.pre_alloc = pre_alloc + self.interleave = interleave + self.low_cyl = low_cyl + self.high_cyl = high_cyl + self.num_buffer = num_buffer + self.buf_mem_type = buf_mem_type + self.max_transfer = max_transfer + self.mask = mask + self.boot_pri = boot_pri + self.dos_type = dos_type + self.baud = baud + self.control = control + self.boot_blocks = boot_blocks + + def dump(self): + print("DosEnv") + print(" size: %d" % self.size) + print(" block_size: %d" % self.block_size) + print(" sec_org: %d" % self.sec_org) + print(" surfaces: %d" % self.surfaces) + print(" sec_per_blk: %d" % self.sec_per_blk) + print(" blk_per_trk: %d" % self.blk_per_trk) + print(" reserved: %d" % self.reserved) + print(" pre_alloc: %d" % self.pre_alloc) + print(" interleave: %d" % self.interleave) + print(" low_cyl: %d" % self.low_cyl) + print(" high_cyl: %d" % self.high_cyl) + print(" num_buffer: %d" % self.num_buffer) + print(" buf_mem_type: 0x%08x" % self.buf_mem_type) + print(" max_transfer: 0x%08x" % self.max_transfer) + print(" mask: 0x%08x" % self.mask) + print(" boot_pri: %d" % self.boot_pri) + print( + " dos_type: 0x%08x = %s" + % (self.dos_type, DosType.num_to_tag_str(self.dos_type)) + ) + print(" baud: %d" % self.baud) + print(" control: %d" % self.control) + print(" boot_blocks: %d" % self.boot_blocks) + + def read(self, blk): + self.size = blk._get_long(32) + self.block_size = blk._get_long(33) + self.sec_org = blk._get_long(34) + self.surfaces = blk._get_long(35) + self.sec_per_blk = blk._get_long(36) + self.blk_per_trk = blk._get_long(37) + self.reserved = blk._get_long(38) + self.pre_alloc = blk._get_long(39) + self.interleave = blk._get_long(40) + self.low_cyl = blk._get_long(41) + self.high_cyl = blk._get_long(42) + self.num_buffer = blk._get_long(43) + self.buf_mem_type = blk._get_long(44) + self.max_transfer = blk._get_long(45) + self.mask = blk._get_long(46) + self.boot_pri = blk._get_slong(47) + self.dos_type = blk._get_long(48) + self.baud = blk._get_long(49) + self.control = blk._get_long(50) + self.boot_blocks = blk._get_long(51) + + def write(self, blk): + blk._put_long(32, self.size) + blk._put_long(33, self.block_size) + blk._put_long(34, self.sec_org) + blk._put_long(35, self.surfaces) + blk._put_long(36, self.sec_per_blk) + blk._put_long(37, self.blk_per_trk) + blk._put_long(38, self.reserved) + blk._put_long(39, self.pre_alloc) + blk._put_long(40, self.interleave) + blk._put_long(41, self.low_cyl) + blk._put_long(42, self.high_cyl) + blk._put_long(43, self.num_buffer) + blk._put_long(44, self.buf_mem_type) + blk._put_long(45, self.max_transfer) + blk._put_long(46, self.mask) + blk._put_slong(47, self.boot_pri) + blk._put_long(48, self.dos_type) + blk._put_long(49, self.baud) + blk._put_long(50, self.control) + blk._put_long(51, self.boot_blocks) - print(" size: %d" % self.size) - print(" host_id: %d" % self.host_id) - print(" next: %s" % self._dump_ptr(self.next)) - print(" flags: 0x%08x" % self.flags) - print(" dev_flags: 0x%08x" % self.dev_flags) - print(" drv_name: '%s'" % self.drv_name) - self.dos_env.dump() +class PartitionBlock(Block): + FLAG_BOOTABLE = 1 + FLAG_NO_AUTOMOUNT = 2 + + def __init__(self, blkdev, blk_num): + Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.PART) + + def create( + self, + drv_name, + dos_env, + host_id=7, + next=Block.no_blk, + flags=0, + dev_flags=0, + size=64, + ): + Block.create(self) + self.size = size + self.host_id = host_id + self.next = next + self.flags = flags + + self.dev_flags = dev_flags + assert isinstance(drv_name, FSString) + self.drv_name = drv_name + + if dos_env == None: + dos_env = PartitionDosEnv() + self.dos_env = dos_env + self.valid = True + + def write(self): + self._create_data() + + self._put_long(1, self.size) + self._put_long(3, self.host_id) + self._put_long(4, self.next) + self._put_long(5, self.flags) + + self._put_long(8, self.dev_flags) + self._put_bstr(9, 31, self.drv_name) + + self.dos_env.write(self) + + Block.write(self) + + def read(self): + Block.read(self) + if not self.valid: + return False + + self.size = self._get_long(1) + self.host_id = self._get_long(3) + self.next = self._get_long(4) + self.flags = self._get_long(5) + + self.dev_flags = self._get_long(8) + self.drv_name = self._get_bstr(9, 31) + + self.dos_env = PartitionDosEnv() + self.dos_env.read(self) + + return self.valid + + def dump(self): + Block.dump(self, "Partition") + + print(" size: %d" % self.size) + print(" host_id: %d" % self.host_id) + print(" next: %s" % self._dump_ptr(self.next)) + print(" flags: 0x%08x" % self.flags) + print(" dev_flags: 0x%08x" % self.dev_flags) + print(" drv_name: '%s'" % self.drv_name) + + self.dos_env.dump() diff --git a/amitools/fs/block/rdb/RDBlock.py b/amitools/fs/block/rdb/RDBlock.py index fb4ea645..0645dd15 100644 --- a/amitools/fs/block/rdb/RDBlock.py +++ b/amitools/fs/block/rdb/RDBlock.py @@ -1,217 +1,249 @@ - - - from ..Block import Block from amitools.fs.FSString import FSString + class RDBPhysicalDrive: - def __init__(self, cyls=0, heads=0, secs=0, - interleave=1, parking_zone=-1, write_pre_comp=-1, reduced_write=-1, step_rate=3): - if parking_zone == -1: - parking_zone = cyls - if write_pre_comp == -1: - write_pre_comp = cyls - if reduced_write == -1: - reduced_write = cyls - - self.cyls = cyls - self.heads = heads - self.secs = secs - self.interleave = interleave - self.parking_zone = parking_zone - self.write_pre_comp = write_pre_comp - self.reduced_write = reduced_write - self.step_rate = step_rate - - def dump(self): - print("PhysicalDrive") - print(" cyls: %d" % self.cyls) - print(" heads: %d" % self.heads) - print(" secs: %d" % self.secs) - print(" interleave: %d" % self.interleave) - print(" parking_zone: %d" % self.parking_zone) - print(" write_pre_comp: %d" % self.write_pre_comp) - print(" reduced_write: %d" % self.reduced_write) - print(" step_rate: %d" % self.step_rate) - - def read(self, blk): - self.cyls = blk._get_long(16) - self.secs = blk._get_long(17) - self.heads = blk._get_long(18) - self.interleave = blk._get_long(19) - self.parking_zone = blk._get_long(20) - - self.write_pre_comp = blk._get_long(24) - self.reduced_write = blk._get_long(25) - self.step_rate = blk._get_long(26) - - def write(self, blk): - blk._put_long(16, self.cyls) - blk._put_long(17, self.secs) - blk._put_long(18, self.heads) - blk._put_long(19, self.interleave) - blk._put_long(20, self.parking_zone) - - blk._put_long(24, self.write_pre_comp) - blk._put_long(25, self.reduced_write) - blk._put_long(26, self.step_rate) + def __init__( + self, + cyls=0, + heads=0, + secs=0, + interleave=1, + parking_zone=-1, + write_pre_comp=-1, + reduced_write=-1, + step_rate=3, + ): + if parking_zone == -1: + parking_zone = cyls + if write_pre_comp == -1: + write_pre_comp = cyls + if reduced_write == -1: + reduced_write = cyls + + self.cyls = cyls + self.heads = heads + self.secs = secs + self.interleave = interleave + self.parking_zone = parking_zone + self.write_pre_comp = write_pre_comp + self.reduced_write = reduced_write + self.step_rate = step_rate + + def dump(self): + print("PhysicalDrive") + print(" cyls: %d" % self.cyls) + print(" heads: %d" % self.heads) + print(" secs: %d" % self.secs) + print(" interleave: %d" % self.interleave) + print(" parking_zone: %d" % self.parking_zone) + print(" write_pre_comp: %d" % self.write_pre_comp) + print(" reduced_write: %d" % self.reduced_write) + print(" step_rate: %d" % self.step_rate) + + def read(self, blk): + self.cyls = blk._get_long(16) + self.secs = blk._get_long(17) + self.heads = blk._get_long(18) + self.interleave = blk._get_long(19) + self.parking_zone = blk._get_long(20) + + self.write_pre_comp = blk._get_long(24) + self.reduced_write = blk._get_long(25) + self.step_rate = blk._get_long(26) + + def write(self, blk): + blk._put_long(16, self.cyls) + blk._put_long(17, self.secs) + blk._put_long(18, self.heads) + blk._put_long(19, self.interleave) + blk._put_long(20, self.parking_zone) + + blk._put_long(24, self.write_pre_comp) + blk._put_long(25, self.reduced_write) + blk._put_long(26, self.step_rate) class RDBLogicalDrive: - def __init__(self, rdb_blk_lo=0, rdb_blk_hi=0, lo_cyl=0, hi_cyl=0, - cyl_blks=0, high_rdsk_blk=0, auto_park_secs=0): - self.rdb_blk_lo = rdb_blk_lo - self.rdb_blk_hi = rdb_blk_hi - self.lo_cyl = lo_cyl - self.hi_cyl = hi_cyl - self.cyl_blks = cyl_blks - self.high_rdsk_blk = high_rdsk_blk - self.auto_park_secs = auto_park_secs - - def dump(self): - print("LogicalDrive") - print(" rdb_blk_lo: %d" % self.rdb_blk_lo) - print(" rdb_blk_hi: %d" % self.rdb_blk_hi) - print(" lo_cyl: %d" % self.lo_cyl) - print(" hi_cyl: %d" % self.hi_cyl) - print(" cyl_blks: %d" % self.cyl_blks) - print(" high_rdsk_blk: %d" % self.high_rdsk_blk) - print(" auto_park_secs: %d" % self.auto_park_secs) - - def read(self, blk): - self.rdb_blk_lo = blk._get_long(32) - self.rdb_blk_hi = blk._get_long(33) - self.lo_cyl = blk._get_long(34) - self.hi_cyl = blk._get_long(35) - self.cyl_blks = blk._get_long(36) - self.auto_park_secs = blk._get_long(37) - self.high_rdsk_blk = blk._get_long(38) - - def write(self, blk): - blk._put_long(32, self.rdb_blk_lo) - blk._put_long(33, self.rdb_blk_hi) - blk._put_long(34, self.lo_cyl) - blk._put_long(35, self.hi_cyl) - blk._put_long(36, self.cyl_blks) - blk._put_long(37, self.auto_park_secs) - blk._put_long(38, self.high_rdsk_blk) + def __init__( + self, + rdb_blk_lo=0, + rdb_blk_hi=0, + lo_cyl=0, + hi_cyl=0, + cyl_blks=0, + high_rdsk_blk=0, + auto_park_secs=0, + ): + self.rdb_blk_lo = rdb_blk_lo + self.rdb_blk_hi = rdb_blk_hi + self.lo_cyl = lo_cyl + self.hi_cyl = hi_cyl + self.cyl_blks = cyl_blks + self.high_rdsk_blk = high_rdsk_blk + self.auto_park_secs = auto_park_secs + + def dump(self): + print("LogicalDrive") + print(" rdb_blk_lo: %d" % self.rdb_blk_lo) + print(" rdb_blk_hi: %d" % self.rdb_blk_hi) + print(" lo_cyl: %d" % self.lo_cyl) + print(" hi_cyl: %d" % self.hi_cyl) + print(" cyl_blks: %d" % self.cyl_blks) + print(" high_rdsk_blk: %d" % self.high_rdsk_blk) + print(" auto_park_secs: %d" % self.auto_park_secs) + + def read(self, blk): + self.rdb_blk_lo = blk._get_long(32) + self.rdb_blk_hi = blk._get_long(33) + self.lo_cyl = blk._get_long(34) + self.hi_cyl = blk._get_long(35) + self.cyl_blks = blk._get_long(36) + self.auto_park_secs = blk._get_long(37) + self.high_rdsk_blk = blk._get_long(38) + + def write(self, blk): + blk._put_long(32, self.rdb_blk_lo) + blk._put_long(33, self.rdb_blk_hi) + blk._put_long(34, self.lo_cyl) + blk._put_long(35, self.hi_cyl) + blk._put_long(36, self.cyl_blks) + blk._put_long(37, self.auto_park_secs) + blk._put_long(38, self.high_rdsk_blk) class RDBDriveID: - def __init__(self, disk_vendor="", disk_product="", disk_revision="", - ctrl_vendor="", ctrl_product="", ctrl_revision=""): - self.disk_vendor = FSString(disk_vendor) - self.disk_product = FSString(disk_product) - self.disk_revision = FSString(disk_revision) - self.ctrl_vendor = FSString(ctrl_vendor) - self.ctrl_product = FSString(ctrl_product) - self.ctrl_revision = FSString(ctrl_revision) - - def dump(self): - print("DriveID") - print(" disk_vendor: '%s'" % self.disk_vendor) - print(" disk_product: '%s'" % self.disk_product) - print(" disk_revision: '%s'" % self.disk_revision) - print(" ctrl_vendor: '%s'" % self.ctrl_vendor) - print(" ctrl_product: '%s'" % self.ctrl_product) - print(" ctrl_revision: '%s'" % self.ctrl_revision) - - def read(self, blk): - self.disk_vendor = blk._get_cstr(40, 8) - self.disk_product = blk._get_cstr(42, 16) - self.disk_revision = blk._get_cstr(46, 4) - self.ctrl_vendor = blk._get_cstr(47, 8) - self.ctrl_product = blk._get_cstr(49, 16) - self.ctrl_revision = blk._get_cstr(53, 4) - - def write(self, blk): - blk._put_cstr(40, 8, self.disk_vendor) - blk._put_cstr(42, 16, self.disk_product) - blk._put_cstr(46, 4, self.disk_revision) - blk._put_cstr(47, 8, self.ctrl_vendor) - blk._put_cstr(49, 16, self.ctrl_product) - blk._put_cstr(53, 4, self.ctrl_revision) + def __init__( + self, + disk_vendor="", + disk_product="", + disk_revision="", + ctrl_vendor="", + ctrl_product="", + ctrl_revision="", + ): + self.disk_vendor = FSString(disk_vendor) + self.disk_product = FSString(disk_product) + self.disk_revision = FSString(disk_revision) + self.ctrl_vendor = FSString(ctrl_vendor) + self.ctrl_product = FSString(ctrl_product) + self.ctrl_revision = FSString(ctrl_revision) + + def dump(self): + print("DriveID") + print(" disk_vendor: '%s'" % self.disk_vendor) + print(" disk_product: '%s'" % self.disk_product) + print(" disk_revision: '%s'" % self.disk_revision) + print(" ctrl_vendor: '%s'" % self.ctrl_vendor) + print(" ctrl_product: '%s'" % self.ctrl_product) + print(" ctrl_revision: '%s'" % self.ctrl_revision) + + def read(self, blk): + self.disk_vendor = blk._get_cstr(40, 8) + self.disk_product = blk._get_cstr(42, 16) + self.disk_revision = blk._get_cstr(46, 4) + self.ctrl_vendor = blk._get_cstr(47, 8) + self.ctrl_product = blk._get_cstr(49, 16) + self.ctrl_revision = blk._get_cstr(53, 4) + + def write(self, blk): + blk._put_cstr(40, 8, self.disk_vendor) + blk._put_cstr(42, 16, self.disk_product) + blk._put_cstr(46, 4, self.disk_revision) + blk._put_cstr(47, 8, self.ctrl_vendor) + blk._put_cstr(49, 16, self.ctrl_product) + blk._put_cstr(53, 4, self.ctrl_revision) class RDBlock(Block): - def __init__(self, blkdev, blk_num=0): - Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.RDSK) - - def create(self, phy_drv, log_drv, drv_id, - host_id=7, block_size=512, flags=0x17, - badblk_list=Block.no_blk, part_list=Block.no_blk, fs_list=Block.no_blk, init_code=Block.no_blk, - size=64): - Block.create(self) - self.size = size - self.host_id = host_id - self.block_size = block_size - self.flags = flags - - self.badblk_list = badblk_list - self.part_list = part_list - self.fs_list = fs_list - self.init_code = init_code - - self.phy_drv = phy_drv - self.log_drv = log_drv - self.drv_id = drv_id - self.valid = True - - def write(self): - self._create_data() - - self._put_long(1, self.size) - self._put_long(3, self.host_id) - self._put_long(4, self.block_size) - self._put_long(5, self.flags) - - self._put_long(6, self.badblk_list) - self._put_long(7, self.part_list) - self._put_long(8, self.fs_list) - self._put_long(9, self.init_code) - - self.phy_drv.write(self) - self.log_drv.write(self) - self.drv_id.write(self) - - Block.write(self) - - def read(self): - Block.read(self) - if not self.valid: - return False - - self.size = self._get_long(1) - self.host_id = self._get_long(3) - self.block_size = self._get_long(4) - self.flags = self._get_long(5) - - self.badblk_list = self._get_long(6) - self.part_list = self._get_long(7) - self.fs_list = self._get_long(8) - self.init_code = self._get_long(9) - - self.phy_drv = RDBPhysicalDrive() - self.phy_drv.read(self) - self.log_drv = RDBLogicalDrive() - self.log_drv.read(self) - self.drv_id = RDBDriveID() - self.drv_id.read(self) - - return self.valid - - def dump(self): - Block.dump(self, "RigidDisk") - - print(" size: %d" % self.size) - print(" host_id: %d" % self.host_id) - print(" block_size: %d" % self.block_size) - print(" flags: 0x%08x" % self.flags) - print(" badblk_list: %s" % self._dump_ptr(self.badblk_list)) - print(" part_list: %s" % self._dump_ptr(self.part_list)) - print(" fs_list: %s" % self._dump_ptr(self.fs_list)) - print(" init_code: %s" % self._dump_ptr(self.init_code)) - - self.phy_drv.dump() - self.log_drv.dump() - self.drv_id.dump() + def __init__(self, blkdev, blk_num=0): + Block.__init__(self, blkdev, blk_num, chk_loc=2, is_type=Block.RDSK) + + def create( + self, + phy_drv, + log_drv, + drv_id, + host_id=7, + block_size=512, + flags=0x17, + badblk_list=Block.no_blk, + part_list=Block.no_blk, + fs_list=Block.no_blk, + init_code=Block.no_blk, + size=64, + ): + Block.create(self) + self.size = size + self.host_id = host_id + self.block_size = block_size + self.flags = flags + + self.badblk_list = badblk_list + self.part_list = part_list + self.fs_list = fs_list + self.init_code = init_code + + self.phy_drv = phy_drv + self.log_drv = log_drv + self.drv_id = drv_id + self.valid = True + + def write(self): + self._create_data() + + self._put_long(1, self.size) + self._put_long(3, self.host_id) + self._put_long(4, self.block_size) + self._put_long(5, self.flags) + + self._put_long(6, self.badblk_list) + self._put_long(7, self.part_list) + self._put_long(8, self.fs_list) + self._put_long(9, self.init_code) + + self.phy_drv.write(self) + self.log_drv.write(self) + self.drv_id.write(self) + + Block.write(self) + + def read(self): + Block.read(self) + if not self.valid: + return False + + self.size = self._get_long(1) + self.host_id = self._get_long(3) + self.block_size = self._get_long(4) + self.flags = self._get_long(5) + + self.badblk_list = self._get_long(6) + self.part_list = self._get_long(7) + self.fs_list = self._get_long(8) + self.init_code = self._get_long(9) + + self.phy_drv = RDBPhysicalDrive() + self.phy_drv.read(self) + self.log_drv = RDBLogicalDrive() + self.log_drv.read(self) + self.drv_id = RDBDriveID() + self.drv_id.read(self) + + return self.valid + + def dump(self): + Block.dump(self, "RigidDisk") + + print(" size: %d" % self.size) + print(" host_id: %d" % self.host_id) + print(" block_size: %d" % self.block_size) + print(" flags: 0x%08x" % self.flags) + print(" badblk_list: %s" % self._dump_ptr(self.badblk_list)) + print(" part_list: %s" % self._dump_ptr(self.part_list)) + print(" fs_list: %s" % self._dump_ptr(self.fs_list)) + print(" init_code: %s" % self._dump_ptr(self.init_code)) + + self.phy_drv.dump() + self.log_drv.dump() + self.drv_id.dump() diff --git a/amitools/fs/rdb/FileSystem.py b/amitools/fs/rdb/FileSystem.py index 263c30ce..38910ed0 100644 --- a/amitools/fs/rdb/FileSystem.py +++ b/amitools/fs/rdb/FileSystem.py @@ -1,149 +1,153 @@ - - - from amitools.fs.block.rdb.FSHeaderBlock import * from amitools.fs.block.rdb.LoadSegBlock import * from amitools.util.HexDump import * import amitools.fs.DosType as DosType + class FileSystem: - def __init__(self, blkdev, blk_num, num): - self.blkdev = blkdev - self.blk_num = blk_num - self.num = num - self.fshd = None - self.valid = False - self.lsegs = [] - self.data = None - - def get_next_fs_blk(self): - if self.fshd != None: - return self.fshd.next - else: - return 0xffffffff - - def get_blk_nums(self): - res = [self.blk_num] - for ls in self.lsegs: - res.append(ls.blk_num) - return res - - def read(self): - # read fs header - self.fshd = FSHeaderBlock(self.blkdev, self.blk_num) - if not self.fshd.read(): - self.valid = False - return False - # read lseg blocks - lseg_blk = self.fshd.dev_node.seg_list_blk - self.lsegs = [] - data = b"" - while lseg_blk != 0xffffffff: - ls = LoadSegBlock(self.blkdev, lseg_blk) - if not ls.read(): + def __init__(self, blkdev, blk_num, num): + self.blkdev = blkdev + self.blk_num = blk_num + self.num = num + self.fshd = None self.valid = False - return False - lseg_blk = ls.next - data += ls.get_data() - self.lsegs.append(ls) - self.data = data - return True - - def get_data(self): - return self.data - - # ----- create ------ - - def get_total_blocks(self, data): - size = len(data) - lseg_size = self.blkdev.block_bytes - 20 - num_lseg = int((size + lseg_size - 1)/lseg_size) - return num_lseg + 1 - - def create(self, blks, data, version, dos_type, dev_flags=None): - self.data = data - # create fs header - self.fshd = FSHeaderBlock(self.blkdev, self.blk_num) - self.fshd.create(version=version, dos_type=dos_type) - # store begin of seg list - self.fshd.set_flag('seg_list_blk',blks[0]) - self.fshd.set_flag('global_vec', 0xffffffff) - # add custom flags - if dev_flags is not None: - for p in dev_flags: - self.fshd.set_flag(p[0], p[1]) - # create lseg blocks - self.lsegs = [] - lseg_size = self.blkdev.block_bytes - 20 - off = 0 - size = len(data) - blk_off = 0 - while(off < size): - blk_len = size - off - if blk_len > lseg_size: - blk_len = lseg_size - blk_data = data[off:off+blk_len] - # create new lseg block - ls = LoadSegBlock(self.blkdev, blks[blk_off]) - # get next block - if blk_off == len(blks)-1: - next = Block.no_blk - else: - next = blks[blk_off+1] - ls.create(next=next) - ls.set_data(blk_data) - self.lsegs.append(ls) - # next round - off += blk_len - blk_off += 1 - - def write(self, only_fshd=False): - self.fshd.write() - if not only_fshd: - for lseg in self.lsegs: - lseg.write() - - # ----- query ----- - - def dump(self, hex_dump=False): - if self.fshd != None: - self.fshd.dump() - # only dump ids of lseg blocks - print("LoadSegBlocks:") - ids = [] - for ls in self.lsegs: - ids.append(str(ls.blk_num)) - print(" lseg blks: %s" % ",".join(ids)) - print(" data size: %d" % len(self.data)) - if hex_dump: - print_hex(self.data) - - def get_flags_info(self): - flags = self.fshd.get_flags() - res = [] - for f in flags: - res.append("%s=0x%x" % f) - return " ".join(res) - - def get_valid_flag_names(self): - return self.fshd.get_valid_flag_names() - - def get_info(self): - flags = self.get_flags_info() - dt = self.fshd.dos_type - dt_str = DosType.num_to_tag_str(dt) - return "FileSystem #%d %s/0x%04x version=%s size=%d %s" % (self.num, dt_str, dt, self.fshd.get_version_string(), len(self.data), flags) - - # ----- edit ----- - - def clear_flags(self): - self.fshd.patch_flags = 0 - self.fshd.write() - return True - - def set_flags(self, flags, clear=False): - if clear: - self.fshd.patch_flags = 0 - self.fshd.set_flags(flags) - self.fshd.write() - + self.lsegs = [] + self.data = None + + def get_next_fs_blk(self): + if self.fshd != None: + return self.fshd.next + else: + return 0xFFFFFFFF + + def get_blk_nums(self): + res = [self.blk_num] + for ls in self.lsegs: + res.append(ls.blk_num) + return res + + def read(self): + # read fs header + self.fshd = FSHeaderBlock(self.blkdev, self.blk_num) + if not self.fshd.read(): + self.valid = False + return False + # read lseg blocks + lseg_blk = self.fshd.dev_node.seg_list_blk + self.lsegs = [] + data = b"" + while lseg_blk != 0xFFFFFFFF: + ls = LoadSegBlock(self.blkdev, lseg_blk) + if not ls.read(): + self.valid = False + return False + lseg_blk = ls.next + data += ls.get_data() + self.lsegs.append(ls) + self.data = data + return True + + def get_data(self): + return self.data + + # ----- create ------ + + def get_total_blocks(self, data): + size = len(data) + lseg_size = self.blkdev.block_bytes - 20 + num_lseg = int((size + lseg_size - 1) / lseg_size) + return num_lseg + 1 + + def create(self, blks, data, version, dos_type, dev_flags=None): + self.data = data + # create fs header + self.fshd = FSHeaderBlock(self.blkdev, self.blk_num) + self.fshd.create(version=version, dos_type=dos_type) + # store begin of seg list + self.fshd.set_flag("seg_list_blk", blks[0]) + self.fshd.set_flag("global_vec", 0xFFFFFFFF) + # add custom flags + if dev_flags is not None: + for p in dev_flags: + self.fshd.set_flag(p[0], p[1]) + # create lseg blocks + self.lsegs = [] + lseg_size = self.blkdev.block_bytes - 20 + off = 0 + size = len(data) + blk_off = 0 + while off < size: + blk_len = size - off + if blk_len > lseg_size: + blk_len = lseg_size + blk_data = data[off : off + blk_len] + # create new lseg block + ls = LoadSegBlock(self.blkdev, blks[blk_off]) + # get next block + if blk_off == len(blks) - 1: + next = Block.no_blk + else: + next = blks[blk_off + 1] + ls.create(next=next) + ls.set_data(blk_data) + self.lsegs.append(ls) + # next round + off += blk_len + blk_off += 1 + + def write(self, only_fshd=False): + self.fshd.write() + if not only_fshd: + for lseg in self.lsegs: + lseg.write() + + # ----- query ----- + + def dump(self, hex_dump=False): + if self.fshd != None: + self.fshd.dump() + # only dump ids of lseg blocks + print("LoadSegBlocks:") + ids = [] + for ls in self.lsegs: + ids.append(str(ls.blk_num)) + print(" lseg blks: %s" % ",".join(ids)) + print(" data size: %d" % len(self.data)) + if hex_dump: + print_hex(self.data) + + def get_flags_info(self): + flags = self.fshd.get_flags() + res = [] + for f in flags: + res.append("%s=0x%x" % f) + return " ".join(res) + + def get_valid_flag_names(self): + return self.fshd.get_valid_flag_names() + + def get_info(self): + flags = self.get_flags_info() + dt = self.fshd.dos_type + dt_str = DosType.num_to_tag_str(dt) + return "FileSystem #%d %s/0x%04x version=%s size=%d %s" % ( + self.num, + dt_str, + dt, + self.fshd.get_version_string(), + len(self.data), + flags, + ) + + # ----- edit ----- + + def clear_flags(self): + self.fshd.patch_flags = 0 + self.fshd.write() + return True + + def set_flags(self, flags, clear=False): + if clear: + self.fshd.patch_flags = 0 + self.fshd.set_flags(flags) + self.fshd.write() diff --git a/amitools/fs/rdb/Partition.py b/amitools/fs/rdb/Partition.py index 599dfd8d..0134213c 100644 --- a/amitools/fs/rdb/Partition.py +++ b/amitools/fs/rdb/Partition.py @@ -3,112 +3,122 @@ import amitools.util.ByteSize as ByteSize import amitools.fs.DosType as DosType + class Partition: - def __init__(self, blkdev, blk_num, num, cyl_blks, rdisk): - self.blkdev = blkdev - self.blk_num = blk_num - self.num = num - self.cyl_blks = cyl_blks - self.rdisk = rdisk - self.block_bytes = rdisk.block_bytes - self.part_blk = None - - def get_next_partition_blk(self): - if self.part_blk != None: - return self.part_blk.next - else: - return 0xffffffff - - def get_blk_num(self): - """return the block number of the partition block""" - return self.blk_num - - def read(self): - # read fs header - self.part_blk = PartitionBlock(self.blkdev, self.blk_num) - if not self.part_blk.read(): - self.valid = False - return False - self.valid = True - return True - - def create_blkdev(self, auto_close_rdb_blkdev=False): - """create a block device for accessing this partition""" - return PartBlockDevice(self.blkdev, self.part_blk, auto_close_rdb_blkdev) - - def write(self): - self.part_blk.write() - - # ----- Query ----- - - def dump(self): - self.part_blk.dump() - - def get_num_cyls(self): - p = self.part_blk - return p.dos_env.high_cyl - p.dos_env.low_cyl + 1 - - def get_num_blocks(self): - """return total number of blocks in this partition""" - return self.get_num_cyls() * self.cyl_blks - - def get_num_bytes(self): - return self.get_num_blocks() * self.block_bytes - - def get_drive_name(self): - return self.part_blk.drv_name - - def get_flags(self): - return self.part_blk.flags - - def get_index(self): - return self.num - - def get_cyl_range(self): - de = self.part_blk.dos_env - if de == None: - return None - else: - return (de.low_cyl, de.high_cyl) - - def get_info(self, total_blks=0): - """return a string line with typical info about this partition""" - p = self.part_blk - de = p.dos_env - name = "'%s'" % p.drv_name - part_blks = self.get_num_blocks() - part_bytes = self.get_num_bytes() - extra = "" - if total_blks != 0: - ratio = 100.0 * part_blks / total_blks - extra += "%6.2f%% " % ratio - # add dos type - dos_type = de.dos_type - extra += DosType.num_to_tag_str(dos_type) - extra += "/0x%04x" % dos_type - return "Partition: #%d %-06s %8d %8d %10d %s %s" \ - % (self.num, name, de.low_cyl, de.high_cyl, part_blks, ByteSize.to_byte_size_str(part_bytes), extra) - - def get_extra_infos(self): - result = [] - p = self.part_blk - de = p.dos_env - # layout - result.append("blk_longs=%d, sec/blk=%d, surf=%d, blk/trk=%d" % \ - (de.block_size, de.sec_per_blk, de.surfaces, de.blk_per_trk)) - result.append("fs_block_size=%d" % (de.block_size * 4 * de.sec_per_blk)) - # max transfer - result.append("max_transfer=0x%x" % de.max_transfer) - result.append("mask=0x%x" % de.mask) - result.append("num_buffer=%d" % de.num_buffer) - # add flags - flags = p.flags - if flags & PartitionBlock.FLAG_BOOTABLE == PartitionBlock.FLAG_BOOTABLE: - result.append("bootable=1 pri=%d" % de.boot_pri) - else: - result.append("bootable=0") - if flags & PartitionBlock.FLAG_NO_AUTOMOUNT == PartitionBlock.FLAG_NO_AUTOMOUNT: - result.append("automount=0") - else: - result.append("automount=1") - return result + def __init__(self, blkdev, blk_num, num, cyl_blks, rdisk): + self.blkdev = blkdev + self.blk_num = blk_num + self.num = num + self.cyl_blks = cyl_blks + self.rdisk = rdisk + self.block_bytes = rdisk.block_bytes + self.part_blk = None + + def get_next_partition_blk(self): + if self.part_blk != None: + return self.part_blk.next + else: + return 0xFFFFFFFF + + def get_blk_num(self): + """return the block number of the partition block""" + return self.blk_num + + def read(self): + # read fs header + self.part_blk = PartitionBlock(self.blkdev, self.blk_num) + if not self.part_blk.read(): + self.valid = False + return False + self.valid = True + return True + + def create_blkdev(self, auto_close_rdb_blkdev=False): + """create a block device for accessing this partition""" + return PartBlockDevice(self.blkdev, self.part_blk, auto_close_rdb_blkdev) + + def write(self): + self.part_blk.write() + + # ----- Query ----- + + def dump(self): + self.part_blk.dump() + + def get_num_cyls(self): + p = self.part_blk + return p.dos_env.high_cyl - p.dos_env.low_cyl + 1 + + def get_num_blocks(self): + """return total number of blocks in this partition""" + return self.get_num_cyls() * self.cyl_blks + + def get_num_bytes(self): + return self.get_num_blocks() * self.block_bytes + + def get_drive_name(self): + return self.part_blk.drv_name + + def get_flags(self): + return self.part_blk.flags + + def get_index(self): + return self.num + + def get_cyl_range(self): + de = self.part_blk.dos_env + if de == None: + return None + else: + return (de.low_cyl, de.high_cyl) + + def get_info(self, total_blks=0): + """return a string line with typical info about this partition""" + p = self.part_blk + de = p.dos_env + name = "'%s'" % p.drv_name + part_blks = self.get_num_blocks() + part_bytes = self.get_num_bytes() + extra = "" + if total_blks != 0: + ratio = 100.0 * part_blks / total_blks + extra += "%6.2f%% " % ratio + # add dos type + dos_type = de.dos_type + extra += DosType.num_to_tag_str(dos_type) + extra += "/0x%04x" % dos_type + return "Partition: #%d %-06s %8d %8d %10d %s %s" % ( + self.num, + name, + de.low_cyl, + de.high_cyl, + part_blks, + ByteSize.to_byte_size_str(part_bytes), + extra, + ) + + def get_extra_infos(self): + result = [] + p = self.part_blk + de = p.dos_env + # layout + result.append( + "blk_longs=%d, sec/blk=%d, surf=%d, blk/trk=%d" + % (de.block_size, de.sec_per_blk, de.surfaces, de.blk_per_trk) + ) + result.append("fs_block_size=%d" % (de.block_size * 4 * de.sec_per_blk)) + # max transfer + result.append("max_transfer=0x%x" % de.max_transfer) + result.append("mask=0x%x" % de.mask) + result.append("num_buffer=%d" % de.num_buffer) + # add flags + flags = p.flags + if flags & PartitionBlock.FLAG_BOOTABLE == PartitionBlock.FLAG_BOOTABLE: + result.append("bootable=1 pri=%d" % de.boot_pri) + else: + result.append("bootable=0") + if flags & PartitionBlock.FLAG_NO_AUTOMOUNT == PartitionBlock.FLAG_NO_AUTOMOUNT: + result.append("automount=0") + else: + result.append("automount=1") + return result diff --git a/amitools/fs/rdb/RDisk.py b/amitools/fs/rdb/RDisk.py index 5c261e7e..9aa0e190 100644 --- a/amitools/fs/rdb/RDisk.py +++ b/amitools/fs/rdb/RDisk.py @@ -1,6 +1,3 @@ - - - from amitools.fs.block.rdb.RDBlock import * from amitools.fs.block.rdb.PartitionBlock import * import amitools.util.ByteSize as ByteSize @@ -8,571 +5,636 @@ from .FileSystem import FileSystem from .Partition import Partition + class RDisk: - def __init__(self, rawblk): - self.rawblk = rawblk - self.valid = False - self.rdb = None - self.parts = [] - self.fs = [] - self.used_blks = [] - self.max_blks = 0 - self.block_bytes = 0 - - def peek_block_size(self): - self.rdb = RDBlock(self.rawblk) - if not self.rdb.read(): - self.valid = False - return None - return self.rdb.block_size - - def open(self): - # read RDB - if not self.rdb: - self.rdb = RDBlock(self.rawblk) - if not self.rdb.read(): - self.valid = False - return False - - # check block size of rdb vs. raw block device - if self.rdb.block_size != self.rawblk.block_bytes: - raise IOError("block size mismatch: rdb=%d != device=%d" % \ - (self.rdb.block_size, self.rawblk.block_bytes)) - self.block_bytes = self.rdb.block_size - - # create used block list - self.used_blks = [self.rdb.blk_num] - - # read partitions - part_blk = self.rdb.part_list - self.parts = [] - num = 0 - while part_blk != Block.no_blk: - p = Partition(self.rawblk, part_blk, num, self.rdb.log_drv.cyl_blks, self) - num += 1 - if not p.read(): + def __init__(self, rawblk): + self.rawblk = rawblk self.valid = False - return False - self.parts.append(p) - # store used block - self.used_blks.append(p.get_blk_num()) - # next partition - part_blk = p.get_next_partition_blk() - - # read filesystems - fs_blk = self.rdb.fs_list - self.fs = [] - num = 0 - while fs_blk != PartitionBlock.no_blk: - fs = FileSystem(self.rawblk, fs_blk, num) - num += 1 - if not fs.read(): + self.rdb = None + self.parts = [] + self.fs = [] + self.used_blks = [] + self.max_blks = 0 + self.block_bytes = 0 + + def peek_block_size(self): + self.rdb = RDBlock(self.rawblk) + if not self.rdb.read(): + self.valid = False + return None + return self.rdb.block_size + + def open(self): + # read RDB + if not self.rdb: + self.rdb = RDBlock(self.rawblk) + if not self.rdb.read(): + self.valid = False + return False + + # check block size of rdb vs. raw block device + if self.rdb.block_size != self.rawblk.block_bytes: + raise IOError( + "block size mismatch: rdb=%d != device=%d" + % (self.rdb.block_size, self.rawblk.block_bytes) + ) + self.block_bytes = self.rdb.block_size + + # create used block list + self.used_blks = [self.rdb.blk_num] + + # read partitions + part_blk = self.rdb.part_list + self.parts = [] + num = 0 + while part_blk != Block.no_blk: + p = Partition(self.rawblk, part_blk, num, self.rdb.log_drv.cyl_blks, self) + num += 1 + if not p.read(): + self.valid = False + return False + self.parts.append(p) + # store used block + self.used_blks.append(p.get_blk_num()) + # next partition + part_blk = p.get_next_partition_blk() + + # read filesystems + fs_blk = self.rdb.fs_list + self.fs = [] + num = 0 + while fs_blk != PartitionBlock.no_blk: + fs = FileSystem(self.rawblk, fs_blk, num) + num += 1 + if not fs.read(): + self.valid = False + return False + self.fs.append(fs) + # store used blocks + self.used_blks += fs.get_blk_nums() + # next partition + fs_blk = fs.get_next_fs_blk() + + # TODO: add bad block blocks + + self.valid = True + self.max_blks = self.rdb.log_drv.rdb_blk_hi + 1 + return True + + def close(self): + self.rdb = None self.valid = False - return False - self.fs.append(fs) - # store used blocks - self.used_blks += fs.get_blk_nums() - # next partition - fs_blk = fs.get_next_fs_blk() - - # TODO: add bad block blocks - - self.valid = True - self.max_blks = self.rdb.log_drv.rdb_blk_hi + 1 - return True - - def close(self): - self.rdb = None - self.valid = False - - # ----- query ----- - - def dump(self, hex_dump=False): - # rdb - if self.rdb != None: - self.rdb.dump() - # partitions - for p in self.parts: - p.dump() - # fs - for fs in self.fs: - fs.dump(hex_dump) - - def get_info(self, part_name=None): - res = [] - part = None - # only show single partition - if part_name: - part = self.find_partition_by_string(part_name) - if not part: - res.append("Partition not found: %s!" % part_name) + + # ----- query ----- + + def dump(self, hex_dump=False): + # rdb + if self.rdb != None: + self.rdb.dump() + # partitions + for p in self.parts: + p.dump() + # fs + for fs in self.fs: + fs.dump(hex_dump) + + def get_info(self, part_name=None): + res = [] + part = None + # only show single partition + if part_name: + part = self.find_partition_by_string(part_name) + if not part: + res.append("Partition not found: %s!" % part_name) + return res + # physical disk info + if part: + logic_blks = self.get_logical_blocks() + res.append(part.get_info(logic_blks)) + extra = part.get_extra_infos() + for e in extra: + res.append("%s%s" % (" " * 70, e)) + else: + pd = self.rdb.phy_drv + total_blks = self.get_total_blocks() + total_bytes = self.get_total_bytes() + bs = self.rdb.block_size + extra = "heads=%d sectors=%d block_size=%d" % (pd.heads, pd.secs, bs) + res.append( + "PhysicalDisk: %8d %8d %10d %s %s" + % ( + 0, + pd.cyls - 1, + total_blks, + ByteSize.to_byte_size_str(total_bytes), + extra, + ) + ) + # logical disk info + ld = self.rdb.log_drv + extra = "rdb_blks=[%d:%d,#%d] used=[hi=%d,#%d] cyl_blks=%d" % ( + ld.rdb_blk_lo, + ld.rdb_blk_hi, + self.max_blks, + ld.high_rdsk_blk, + len(self.used_blks), + ld.cyl_blks, + ) + logic_blks = self.get_logical_blocks() + logic_bytes = self.get_logical_bytes() + res.append( + "LogicalDisk: %8d %8d %10d %s %s" + % ( + ld.lo_cyl, + ld.hi_cyl, + logic_blks, + ByteSize.to_byte_size_str(logic_bytes), + extra, + ) + ) + # add partitions + for p in self.parts: + res.append(p.get_info(logic_blks)) + extra = p.get_extra_infos() + for e in extra: + res.append("%s%s" % (" " * 70, e)) + # add fileystems + for f in self.fs: + res.append(f.get_info()) + return res + + def get_block_map(self): + res = [] + for i in range(self.max_blks): + blk = None + # check partitions + if i == 0: + blk = "RD" + else: + for p in self.parts: + if i == p.get_blk_num(): + blk = "P%d" % p.num + break + if blk == None: + # check file systems + for f in self.fs: + if i in f.get_blk_nums(): + blk = "F%d" % f.num + break + if blk == None: + blk = "--" + res.append(blk) + return res + + def get_logical_cylinders(self): + ld = self.rdb.log_drv + return ld.hi_cyl - ld.lo_cyl + 1 + + def get_logical_blocks(self): + ld = self.rdb.log_drv + cyls = ld.hi_cyl - ld.lo_cyl + 1 + return cyls * ld.cyl_blks + + def get_logical_bytes(self): + return self.get_logical_blocks() * self.block_bytes + + def get_total_blocks(self): + pd = self.rdb.phy_drv + return pd.cyls * pd.heads * pd.secs + + def get_total_bytes(self): + return self.get_total_blocks() * self.block_bytes + + def get_cylinder_blocks(self): + ld = self.rdb.log_drv + return ld.cyl_blks + + def get_cylinder_bytes(self): + return self.get_cylinder_blocks() * self.block_bytes + + def get_num_partitions(self): + return len(self.parts) + + def get_partition(self, num): + if num < len(self.parts): + return self.parts[num] + else: + return None + + def find_partition_by_drive_name(self, name): + lo_name = name.lower() + num = 0 + for p in self.parts: + drv_name = p.get_drive_name().get_unicode().lower() + if drv_name == lo_name: + return p + return None + + def find_partition_by_string(self, s): + p = self.find_partition_by_drive_name(s) + if p != None: + return p + # try partition number + try: + num = int(s) + return self.get_partition(num) + except ValueError: + return None + + def get_filesystem(self, num): + if num < len(self.fs): + return self.fs[num] + else: + return None + + def get_used_blocks(self): + return self.used_blks + + def find_filesystem_by_string(self, s): + # try filesystem number + try: + num = int(s) + return self.get_filesystem(num) + except ValueError: + return None + + # ----- edit ----- + + def create( + self, disk_geo, rdb_cyls=1, hi_rdb_blk=0, disk_names=None, ctrl_names=None + ): + cyls = disk_geo.cyls + heads = disk_geo.heads + secs = disk_geo.secs + cyl_blks = heads * secs + rdb_blk_hi = cyl_blks * rdb_cyls - 1 + + if disk_names != None: + disk_vendor = disk_names[0] + disk_product = disk_names[1] + disk_revision = disk_names[2] + else: + disk_vendor = "RDBTOOL" + disk_product = "IMAGE" + disk_revision = "2012" + + if ctrl_names != None: + ctrl_vendor = ctrl_names[0] + ctrl_product = ctrl_names[1] + ctrl_revision = ctrl_names[2] + else: + ctrl_vendor = "" + ctrl_product = "" + ctrl_revision = "" + + flags = 0x7 + if disk_names != None: + flags |= 0x10 + if ctrl_names != None: + flags |= 0x20 + + # create RDB + phy_drv = RDBPhysicalDrive(cyls, heads, secs) + log_drv = RDBLogicalDrive( + rdb_blk_hi=rdb_blk_hi, + lo_cyl=rdb_cyls, + hi_cyl=cyls - 1, + cyl_blks=cyl_blks, + high_rdsk_blk=hi_rdb_blk, + ) + drv_id = RDBDriveID( + disk_vendor, + disk_product, + disk_revision, + ctrl_vendor, + ctrl_product, + ctrl_revision, + ) + self.block_bytes = self.rawblk.block_bytes + self.rdb = RDBlock(self.rawblk) + self.rdb.create( + phy_drv, log_drv, drv_id, block_size=self.block_bytes, flags=flags + ) + self.rdb.write() + + self.used_blks = [self.rdb.blk_num] + self.max_blks = self.rdb.log_drv.rdb_blk_hi + 1 + self.valid = True + + def get_cyl_range(self): + log_drv = self.rdb.log_drv + return (log_drv.lo_cyl, log_drv.hi_cyl) + + def check_cyl_range(self, lo_cyl, hi_cyl): + if lo_cyl > hi_cyl: + return False + (lo, hi) = self.get_cyl_range() + if not (lo_cyl >= lo and hi_cyl <= hi): + return False + # check partitions + for p in self.parts: + (lo, hi) = p.get_cyl_range() + if not ((hi_cyl < lo) or (lo_cyl > hi)): + return False + return True + + def get_free_cyl_ranges(self): + lohi = self.get_cyl_range() + free = [lohi] + for p in self.parts: + pr = p.get_cyl_range() + new_free = [] + for r in free: + # partition completely fills range + if pr[0] == r[0] and pr[1] == r[1]: + pass + # partition starts at range + elif pr[0] == r[0]: + n = (pr[1] + 1, r[1]) + new_free.append(n) + # partition ends at range + elif pr[1] == r[1]: + n = (r[0], pr[0] - 1) + new_free.append(n) + # partition inside range + elif pr[0] > r[0] and pr[1] < r[1]: + new_free.append((r[0], pr[0] - 1)) + new_free.append((pr[1] + 1, r[1])) + else: + new_free.append(r) + free = new_free + if len(free) == 0: + return None + return free + + def find_free_cyl_range_start(self, num_cyls): + ranges = self.get_free_cyl_ranges() + if ranges == None: + return None + for r in ranges: + size = r[1] - r[0] + 1 + if num_cyls <= size: + return r[0] + return None + + # ----- manage rdb blocks ----- + + def _has_free_rdb_blocks(self, num): + return (len(self.used_blks) + num) <= self.max_blks + + def get_free_blocks(self): + res = [] + for i in range(self.max_blks): + if i not in self.used_blks: + res.append(i) return res - # physical disk info - if part: - logic_blks = self.get_logical_blocks() - res.append(part.get_info(logic_blks)) - extra = part.get_extra_infos() - for e in extra: - res.append("%s%s" % (" " * 70, e)) - else: - pd = self.rdb.phy_drv - total_blks = self.get_total_blocks() - total_bytes = self.get_total_bytes() - bs = self.rdb.block_size - extra="heads=%d sectors=%d block_size=%d" % (pd.heads, pd.secs, bs) - res.append("PhysicalDisk: %8d %8d %10d %s %s" \ - % (0, pd.cyls-1, total_blks, ByteSize.to_byte_size_str(total_bytes), extra)) - # logical disk info - ld = self.rdb.log_drv - extra="rdb_blks=[%d:%d,#%d] used=[hi=%d,#%d] cyl_blks=%d" % (ld.rdb_blk_lo, ld.rdb_blk_hi, self.max_blks, ld.high_rdsk_blk, len(self.used_blks), ld.cyl_blks) - logic_blks = self.get_logical_blocks() - logic_bytes = self.get_logical_bytes() - res.append("LogicalDisk: %8d %8d %10d %s %s" \ - % (ld.lo_cyl, ld.hi_cyl, logic_blks, ByteSize.to_byte_size_str(logic_bytes), extra)) - # add partitions - for p in self.parts: - res.append(p.get_info(logic_blks)) - extra = p.get_extra_infos() - for e in extra: - res.append("%s%s" % (" " * 70, e)) - # add fileystems - for f in self.fs: - res.append(f.get_info()) - return res - - def get_block_map(self): - res = [] - for i in range(self.max_blks): - blk = None - # check partitions - if i == 0: - blk = "RD" - else: + + def _alloc_rdb_blocks(self, num): + free = self.get_free_blocks() + n = len(free) + if n == num: + return free + elif n > num: + return free[:num] + else: + return None + + def _next_rdb_block(self): + free = self.get_free_blocks() + if len(free) > 0: + return free[0] + else: + return None + + def _update_hi_blk(self): + hi = 0 + for blk_num in self.used_blks: + if blk_num > hi: + hi = blk_num + ld = self.rdb.log_drv + ld.high_rdsk_blk = hi + + # ----- partition handling ----- + + def _adjust_dos_env(self, dos_env, more_dos_env): + if more_dos_env is None: + return + for p in more_dos_env: + key = p[0] + if hasattr(dos_env, key): + setattr(dos_env, key, int(p[1])) + + def add_partition( + self, + drv_name, + cyl_range, + dev_flags=0, + flags=0, + dos_type=DosType.DOS0, + boot_pri=0, + more_dos_env=None, + fs_block_size=None, + ): + # cyl range is not free anymore or invalid + if not self.check_cyl_range(*cyl_range): + return False + # no space left for partition block + if not self._has_free_rdb_blocks(1): + return False + # allocate block for partition + blk_num = self._alloc_rdb_blocks(1)[0] + self.used_blks.append(blk_num) + self._update_hi_blk() + # crete a new parttion block + pb = PartitionBlock(self.rawblk, blk_num) + # setup fs block size (may be multiple sectors) + if not fs_block_size: + fs_block_size = self.block_bytes + sec_per_blk = int(fs_block_size // self.block_bytes) + if sec_per_blk < 1 or sec_per_blk > 16: + raise IOError("Invalid sec_per_blk: " + sec_per_blk) + # block size in longs + bsl = self.block_bytes >> 2 + # setup dos env + heads = self.rdb.phy_drv.heads + blk_per_trk = self.rdb.phy_drv.secs + dos_env = PartitionDosEnv( + low_cyl=cyl_range[0], + high_cyl=cyl_range[1], + surfaces=heads, + blk_per_trk=blk_per_trk, + dos_type=dos_type, + boot_pri=boot_pri, + block_size=bsl, + sec_per_blk=sec_per_blk, + ) + self._adjust_dos_env(dos_env, more_dos_env) + pb.create(drv_name, dos_env, flags=flags) + pb.write() + # link block + if len(self.parts) == 0: + # write into RDB + self.rdb.part_list = blk_num + else: + # write into last partition block + last_pb = self.parts[-1] + last_pb.part_blk.next = blk_num + last_pb.write() + # always write RDB as allocated block is stored there, too + self.rdb.write() + # flush out all changes before we read again + self.rawblk.flush() + # create partition object and add to partition list + blk_per_cyl = blk_per_trk * heads + p = Partition(self.rawblk, blk_num, len(self.parts), blk_per_cyl, self) + p.read() + self.parts.append(p) + return True + + def change_partition( + self, + pid, + drv_name=None, + dev_flags=None, + dos_type=None, + flags=None, + boot_pri=None, + more_dos_env=None, + fs_block_size=None, + ): + # partition not found + if pid < 0 or pid >= len(self.parts): + return False + p = self.parts[pid] + pb = p.part_blk + if pb == None: + return False + # set flags + dirty = False + if flags != None: + pb.flags = flags + dirty = True + if drv_name != None: + pb.drv_name = drv_name + dirty = True + if dev_flags != None: + pb.dev_flags = dev_flags + dirty = True + # set dosenv flags + if dos_type != None: + pb.dos_env.dos_type = dos_type + dirty = True + if boot_pri != None: + pb.dos_env.boot_pri = boot_pri + dirty = True + # change fs block size + if fs_block_size: + sec_per_blk = int(fs_block_size // self.block_bytes) + if sec_per_blk < 1 or sec_per_blk > 16: + raise IOError("Invalid sec_per_blk: " + sec_per_blk) + ph.dos_env.sec_per_blk = sec_per_blk + # update dos env + if more_dos_env is not None: + self._adjust_dos_env(pb.dos_env, more_dos_env) + dirty = True + # write change + if dirty: + pb.write() + return True + + def delete_partition(self, pid): + # partition not found + if pid < 0 or pid >= len(self.parts): + return False + # unlink partition block + next = Block.no_blk + if (pid + 1) < len(self.parts): + next = self.parts[pid + 1].get_blk_num() + if pid == 0: + self.rdb.part_list = next + else: + last_pb = self.parts[-1] + last_pb.part_blk.next = next + last_pb.write() + # free block + p = self.parts[pid] + blk_num = p.get_blk_num() + self.used_blks.remove(blk_num) + self._update_hi_blk() + # write RDB + self.rdb.write() + # remove partition instance + self.parts.remove(p) + # relabel remaining parts + num = 0 for p in self.parts: - if i == p.get_blk_num(): - blk = 'P%d' % p.num - break - if blk == None: - # check file systems - for f in self.fs: - if i in f.get_blk_nums(): - blk = 'F%d' % f.num - break - if blk == None: - blk = '--' - res.append(blk) - return res - - def get_logical_cylinders(self): - ld = self.rdb.log_drv - return ld.hi_cyl - ld.lo_cyl + 1 - - def get_logical_blocks(self): - ld = self.rdb.log_drv - cyls = ld.hi_cyl - ld.lo_cyl + 1 - return cyls * ld.cyl_blks - - def get_logical_bytes(self): - return self.get_logical_blocks() * self.block_bytes - - def get_total_blocks(self): - pd = self.rdb.phy_drv - return pd.cyls * pd.heads * pd.secs - - def get_total_bytes(self): - return self.get_total_blocks() * self.block_bytes - - def get_cylinder_blocks(self): - ld = self.rdb.log_drv - return ld.cyl_blks - - def get_cylinder_bytes(self): - return self.get_cylinder_blocks() * self.block_bytes - - def get_num_partitions(self): - return len(self.parts) - - def get_partition(self, num): - if num < len(self.parts): - return self.parts[num] - else: - return None - - def find_partition_by_drive_name(self, name): - lo_name = name.lower() - num = 0 - for p in self.parts: - drv_name = p.get_drive_name().get_unicode().lower() - if drv_name == lo_name: - return p - return None - - def find_partition_by_string(self, s): - p = self.find_partition_by_drive_name(s) - if p != None: - return p - # try partition number - try: - num = int(s) - return self.get_partition(num) - except ValueError: - return None - - def get_filesystem(self, num): - if num < len(self.fs): - return self.fs[num] - else: - return None - - def get_used_blocks(self): - return self.used_blks - - def find_filesystem_by_string(self, s): - # try filesystem number - try: - num = int(s) - return self.get_filesystem(num) - except ValueError: - return None - - # ----- edit ----- - - def create(self, disk_geo, rdb_cyls=1, hi_rdb_blk=0, disk_names=None, ctrl_names=None): - cyls = disk_geo.cyls - heads = disk_geo.heads - secs = disk_geo.secs - cyl_blks = heads * secs - rdb_blk_hi = cyl_blks * rdb_cyls - 1 - - if disk_names != None: - disk_vendor = disk_names[0] - disk_product = disk_names[1] - disk_revision = disk_names[2] - else: - disk_vendor = 'RDBTOOL' - disk_product = 'IMAGE' - disk_revision = '2012' - - if ctrl_names != None: - ctrl_vendor = ctrl_names[0] - ctrl_product = ctrl_names[1] - ctrl_revision = ctrl_names[2] - else: - ctrl_vendor = '' - ctrl_product = '' - ctrl_revision = '' - - flags = 0x7 - if disk_names != None: - flags |= 0x10 - if ctrl_names != None: - flags |= 0x20 - - # create RDB - phy_drv = RDBPhysicalDrive(cyls, heads, secs) - log_drv = RDBLogicalDrive(rdb_blk_hi=rdb_blk_hi, lo_cyl=rdb_cyls, hi_cyl=cyls-1, cyl_blks=cyl_blks, high_rdsk_blk=hi_rdb_blk) - drv_id = RDBDriveID(disk_vendor, disk_product, disk_revision, ctrl_vendor, ctrl_product, ctrl_revision) - self.block_bytes = self.rawblk.block_bytes - self.rdb = RDBlock(self.rawblk) - self.rdb.create(phy_drv, log_drv, drv_id, - block_size=self.block_bytes, flags=flags) - self.rdb.write() - - self.used_blks = [self.rdb.blk_num] - self.max_blks = self.rdb.log_drv.rdb_blk_hi + 1 - self.valid = True - - def get_cyl_range(self): - log_drv = self.rdb.log_drv - return (log_drv.lo_cyl, log_drv.hi_cyl) - - def check_cyl_range(self, lo_cyl, hi_cyl): - if lo_cyl > hi_cyl: - return False - (lo,hi) = self.get_cyl_range() - if not (lo_cyl >= lo and hi_cyl <= hi): - return False - # check partitions - for p in self.parts: - (lo,hi) = p.get_cyl_range() - if not ((hi_cyl < lo) or (lo_cyl > hi)): - return False - return True - - def get_free_cyl_ranges(self): - lohi = self.get_cyl_range() - free = [lohi] - for p in self.parts: - pr = p.get_cyl_range() - new_free = [] - for r in free: - # partition completely fills range - if pr[0] == r[0] and pr[1] == r[1]: - pass - # partition starts at range - elif pr[0] == r[0]: - n = (pr[1]+1, r[1]) - new_free.append(n) - # partition ends at range - elif pr[1] == r[1]: - n = (r[0], pr[0]-1) - new_free.append(n) - # partition inside range - elif pr[0] > r[0] and pr[1] < r[1]: - new_free.append((r[0], pr[0]-1)) - new_free.append((pr[1]+1,r[1])) + p.num = num + num += 1 + # done! + return True + + # ----- file system handling ------ + + def add_filesystem(self, data, dos_type=DosType.DOS1, version=0, dev_flags=None): + # create a file system + blk_num = self._next_rdb_block() + fs_num = len(self.fs) + fs = FileSystem(self.rawblk, blk_num, fs_num) + # get total number of blocks for fs data + num_blks = fs.get_total_blocks(data) + # check if RDB has space left + if not self._has_free_rdb_blocks(num_blks): + return False + # allocate blocks + blks = self._alloc_rdb_blocks(num_blks) + self.used_blks += blks + self._update_hi_blk() + # create file system + fs.create(blks[1:], data, version, dos_type, dev_flags) + fs.write() + # link fs block + if len(self.fs) == 0: + # write into RDB + self.rdb.fs_list = blk_num + else: + # write into last fs block + last_fs = self.fs[-1] + last_fs.fshd.next = blk_num + last_fs.write(only_fshd=True) + # update rdb: allocated blocks and optional link + self.rdb.write() + # add fs to list + self.fs.append(fs) + return True + + def delete_filesystem(self, fid): + # check filesystem id + if fid < 0 or fid >= len(self.fs): + return False + # unlink partition block + next = Block.no_blk + if (fid + 1) < len(self.fs): + next = self.fs[fid + 1].blk_num + if fid == 0: + self.rdb.fs_list = next else: - new_free.append(r) - free = new_free - if len(free) == 0: - return None - return free - - def find_free_cyl_range_start(self, num_cyls): - ranges = self.get_free_cyl_ranges() - if ranges == None: - return None - for r in ranges: - size = r[1] - r[0] + 1 - if num_cyls <= size: - return r[0] - return None - - # ----- manage rdb blocks ----- - - def _has_free_rdb_blocks(self, num): - return (len(self.used_blks) + num) <= self.max_blks - - def get_free_blocks(self): - res = [] - for i in range(self.max_blks): - if i not in self.used_blks: - res.append(i) - return res - - def _alloc_rdb_blocks(self, num): - free = self.get_free_blocks() - n = len(free) - if n == num: - return free - elif n > num: - return free[:num] - else: - return None - - def _next_rdb_block(self): - free = self.get_free_blocks() - if len(free) > 0: - return free[0] - else: - return None - - def _update_hi_blk(self): - hi = 0 - for blk_num in self.used_blks: - if blk_num > hi: - hi = blk_num - ld = self.rdb.log_drv - ld.high_rdsk_blk = hi - - # ----- partition handling ----- - - def _adjust_dos_env(self, dos_env, more_dos_env): - if more_dos_env is None: - return - for p in more_dos_env: - key = p[0] - if hasattr(dos_env, key): - setattr(dos_env, key, int(p[1])) - - def add_partition(self, drv_name, cyl_range, dev_flags=0, flags=0, - dos_type=DosType.DOS0, boot_pri=0, more_dos_env=None, - fs_block_size=None): - # cyl range is not free anymore or invalid - if not self.check_cyl_range(*cyl_range): - return False - # no space left for partition block - if not self._has_free_rdb_blocks(1): - return False - # allocate block for partition - blk_num = self._alloc_rdb_blocks(1)[0] - self.used_blks.append(blk_num) - self._update_hi_blk() - # crete a new parttion block - pb = PartitionBlock(self.rawblk, blk_num) - # setup fs block size (may be multiple sectors) - if not fs_block_size: - fs_block_size = self.block_bytes - sec_per_blk = int(fs_block_size // self.block_bytes) - if sec_per_blk < 1 or sec_per_blk > 16: - raise IOError("Invalid sec_per_blk: " + sec_per_blk) - # block size in longs - bsl = self.block_bytes >> 2 - # setup dos env - heads = self.rdb.phy_drv.heads - blk_per_trk = self.rdb.phy_drv.secs - dos_env = PartitionDosEnv(low_cyl=cyl_range[0], high_cyl=cyl_range[1], surfaces=heads, - blk_per_trk=blk_per_trk, dos_type=dos_type, boot_pri=boot_pri, - block_size=bsl, sec_per_blk=sec_per_blk) - self._adjust_dos_env(dos_env, more_dos_env) - pb.create(drv_name, dos_env, flags=flags) - pb.write() - # link block - if len(self.parts) == 0: - # write into RDB - self.rdb.part_list = blk_num - else: - # write into last partition block - last_pb = self.parts[-1] - last_pb.part_blk.next = blk_num - last_pb.write() - # always write RDB as allocated block is stored there, too - self.rdb.write() - # flush out all changes before we read again - self.rawblk.flush() - # create partition object and add to partition list - blk_per_cyl = blk_per_trk * heads - p = Partition(self.rawblk, blk_num, len(self.parts), blk_per_cyl, self) - p.read() - self.parts.append(p) - return True - - def change_partition(self, pid, drv_name=None, dev_flags=None, - dos_type=None, flags=None, boot_pri=None, - more_dos_env=None, fs_block_size=None): - # partition not found - if pid < 0 or pid >= len(self.parts): - return False - p = self.parts[pid] - pb = p.part_blk - if pb == None: - return False - # set flags - dirty = False - if flags != None: - pb.flags = flags - dirty = True - if drv_name != None: - pb.drv_name = drv_name - dirty = True - if dev_flags != None: - pb.dev_flags = dev_flags - dirty = True - # set dosenv flags - if dos_type != None: - pb.dos_env.dos_type = dos_type - dirty = True - if boot_pri != None: - pb.dos_env.boot_pri = boot_pri - dirty = True - # change fs block size - if fs_block_size: - sec_per_blk = int(fs_block_size // self.block_bytes) - if sec_per_blk < 1 or sec_per_blk > 16: - raise IOError("Invalid sec_per_blk: " + sec_per_blk) - ph.dos_env.sec_per_blk = sec_per_blk - # update dos env - if more_dos_env is not None: - self._adjust_dos_env(pb.dos_env, more_dos_env) - dirty = True - # write change - if dirty: - pb.write() - return True - - def delete_partition(self, pid): - # partition not found - if pid < 0 or pid >= len(self.parts): - return False - # unlink partition block - next = Block.no_blk - if (pid + 1) < len(self.parts): - next = self.parts[pid+1].get_blk_num() - if pid == 0: - self.rdb.part_list = next - else: - last_pb = self.parts[-1] - last_pb.part_blk.next = next - last_pb.write() - # free block - p = self.parts[pid] - blk_num = p.get_blk_num() - self.used_blks.remove(blk_num) - self._update_hi_blk() - # write RDB - self.rdb.write() - # remove partition instance - self.parts.remove(p) - # relabel remaining parts - num = 0 - for p in self.parts: - p.num = num - num += 1 - # done! - return True - - # ----- file system handling ------ - - def add_filesystem(self, data, dos_type=DosType.DOS1, version=0, dev_flags=None): - # create a file system - blk_num = self._next_rdb_block() - fs_num = len(self.fs) - fs = FileSystem(self.rawblk, blk_num, fs_num) - # get total number of blocks for fs data - num_blks = fs.get_total_blocks(data) - # check if RDB has space left - if not self._has_free_rdb_blocks(num_blks): - return False - # allocate blocks - blks = self._alloc_rdb_blocks(num_blks) - self.used_blks += blks - self._update_hi_blk() - # create file system - fs.create(blks[1:], data, version, dos_type, dev_flags) - fs.write() - # link fs block - if len(self.fs) == 0: - # write into RDB - self.rdb.fs_list = blk_num - else: - # write into last fs block - last_fs = self.fs[-1] - last_fs.fshd.next = blk_num - last_fs.write(only_fshd=True) - # update rdb: allocated blocks and optional link - self.rdb.write() - # add fs to list - self.fs.append(fs) - return True - - def delete_filesystem(self, fid): - # check filesystem id - if fid < 0 or fid >= len(self.fs): - return False - # unlink partition block - next = Block.no_blk - if (fid + 1) < len(self.fs): - next = self.fs[fid+1].blk_num - if fid == 0: - self.rdb.fs_list = next - else: - last_fs = self.fs[-1] - last_fs.fshd.next = next - last_fs.write() - # free block - f = self.fs[fid] - blks = f.get_blk_nums() - for b in blks: - self.used_blks.remove(b) - self._update_hi_blk() - # write RDB - self.rdb.write() - # remove partition instance - self.fs.remove(f) - # relabel remaining parts - num = 0 - for f in self.fs: - f.num = num - num += 1 - # done! - return True + last_fs = self.fs[-1] + last_fs.fshd.next = next + last_fs.write() + # free block + f = self.fs[fid] + blks = f.get_blk_nums() + for b in blks: + self.used_blks.remove(b) + self._update_hi_blk() + # write RDB + self.rdb.write() + # remove partition instance + self.fs.remove(f) + # relabel remaining parts + num = 0 + for f in self.fs: + f.num = num + num += 1 + # done! + return True diff --git a/amitools/fs/validate/BitmapScan.py b/amitools/fs/validate/BitmapScan.py index dd5c093e..1668e89c 100644 --- a/amitools/fs/validate/BitmapScan.py +++ b/amitools/fs/validate/BitmapScan.py @@ -1,134 +1,176 @@ - - - from amitools.fs.validate.Log import Log import struct + class BitmapScan: - """Validate the bitmap of a file system""" - - def __init__(self, block_scan, log): - self.block_scan = block_scan - self.log = log - self.bm_blocks = None - - def scan_bitmap(self, root): - """scan the file system bitmap""" - # first check bitmap flag - bm_flag = root.bitmap_flag - if bm_flag != 0xffffffff: - self.log.msg(Log.WARN,"Root bitmap flag not valid (-1)",root.blk_num) - # now calculate the size of the bitmap - num_blks = self.block_scan.blkdev.num_blocks - self.block_scan.blkdev.reserved - block_longs = self.block_scan.blkdev.block_longs - 1 # all longs are available for bitmap - self.num_bm_lwords = int((num_blks + 31) // 32) # 32 blocks fit in a long word - self.num_bm_blocks = int((self.num_bm_lwords + block_longs - 1) // block_longs) - self.log.msg(Log.DEBUG,"Total Bitmap DWORDs: %d (block %d)" % (self.num_bm_lwords, block_longs)) - self.log.msg(Log.DEBUG,"Number of Bitmap Blocks: %d" % self.num_bm_blocks) - # calc the bitmask in the last word - last_filled_bits = self.num_bm_lwords * 32 - num_blks - if last_filled_bits == 32: - self.last_mask = 0xffffffff - else: - self.last_mask = (1 << last_filled_bits) - 1 - self.log.msg(Log.DEBUG,"Last DWORD mask: %08x" % self.last_mask) - # now scan bitmap blocks and build list of all bitmap blocks - self.read_bitmap_ptrs_and_blocks(root) - found_blocks = len(self.bm_blocks) - self.log.msg(Log.DEBUG,"Found Bitmap Blocks: %d" % found_blocks) - # check number of blocks - if found_blocks != self.num_bm_blocks: - self.log.msg(Log.ERROR,"Invalid number of Bitmap Blocks: found=%d expected=%d" % (found_blocks, self.num_bm_blocks), root.blk_num) - else: - # check bits in bitmap - self.check_bits() - - def check_bits(self): - """calculate allocation bits and verify with stored ones""" - # block range - blkdev = self.block_scan.blkdev - # first bitmap data - cur_pos = 0 - bm_blk = 0 - cur_data = self.bm_blocks[0].bitmap - blk_size = len(cur_data) - # loop throug all bitmap longwords - lw = 0 - blk_num = blkdev.reserved - max_lw = self.num_bm_lwords - 1 - while lw < max_lw: - got = struct.unpack_from(">I",cur_data,cur_pos)[0] - expect = self.calc_lword(blk_num) - if got != expect: - self.log.msg(Log.ERROR,"Invalid bitmap allocation (@%d: #%d+%d) blks [%d...%d] got=%08x expect=%08x" \ - % (lw, bm_blk, cur_pos/4, blk_num, blk_num+31, got, expect)) - lw += 1 - blk_num += 32 - # fetch next bitmap data block - cur_pos += 4 - if cur_pos == blk_size: - bm_blk += 1 - cur_data = self.bm_blocks[bm_blk].bitmap - cur_pos = 0 - # the last long word - got = struct.unpack_from(">I",cur_data,cur_pos)[0] & self.last_mask - expect = self.calc_lword(blk_num) & self.last_mask - if got != expect: - self.log.msg(Log.ERROR,"Invalid bitmap allocation (last) (@%d: #%d+%d) blks [%d...%d] got=%08x expect=%08x" \ - % (lw, bm_blk, cur_pos/4, blk_num, blkdev.num_blocks-1, got, expect)) - - def calc_lword(self, blk_num): - """calcuate the bitmap lword""" - value = 0 - for i in range(32): - # set bit in lword if block is available - if not self.block_scan.is_block_available(blk_num + i): - mask = 1 << i - value |= mask - return value - - def read_bitmap_ptrs_and_blocks(self, root): - """build the list of all file system bitmap blocks""" - self.bm_blocks = [] - # scan list embedded in root block - self.read_bm_list(root.bitmap_ptrs, root.blk_num) - # now follow bitmap extension blocks - cur_blk_num = root.blk_num - bm_ext = root.bitmap_ext_blk - while bm_ext != 0: - # check ext block - if self.block_scan.is_block_available(bm_ext): - self.log.msg(Log.ERROR,"Bitmap ext block @%d already used?" % bm_block, cur_blk_num) - else: - bi = self.block_scan.read_block(bm_ext, is_bm_ext=True) - if bi == None: - self.log.msg(Log.ERROR,"Error reading bitmap ext block @%d" % bm_ext, cur_blk_num) - break + """Validate the bitmap of a file system""" + + def __init__(self, block_scan, log): + self.block_scan = block_scan + self.log = log + self.bm_blocks = None + + def scan_bitmap(self, root): + """scan the file system bitmap""" + # first check bitmap flag + bm_flag = root.bitmap_flag + if bm_flag != 0xFFFFFFFF: + self.log.msg(Log.WARN, "Root bitmap flag not valid (-1)", root.blk_num) + # now calculate the size of the bitmap + num_blks = self.block_scan.blkdev.num_blocks - self.block_scan.blkdev.reserved + block_longs = ( + self.block_scan.blkdev.block_longs - 1 + ) # all longs are available for bitmap + self.num_bm_lwords = int((num_blks + 31) // 32) # 32 blocks fit in a long word + self.num_bm_blocks = int((self.num_bm_lwords + block_longs - 1) // block_longs) + self.log.msg( + Log.DEBUG, + "Total Bitmap DWORDs: %d (block %d)" % (self.num_bm_lwords, block_longs), + ) + self.log.msg(Log.DEBUG, "Number of Bitmap Blocks: %d" % self.num_bm_blocks) + # calc the bitmask in the last word + last_filled_bits = self.num_bm_lwords * 32 - num_blks + if last_filled_bits == 32: + self.last_mask = 0xFFFFFFFF else: - self.read_bm_list(bi.bitmap_ptrs, bm_ext) - cur_blk_num = bm_ext - bm_ext = bi.next_blk - - def read_bm_list(self, ptrs, blk_num): - list_end = False - for bm_block in ptrs: - # still inside the pointer list - if list_end == False: - # add a normal block - if bm_block != 0: - # make sure bitmap block was not used already - if self.block_scan.is_block_available(bm_block): - self.log.msg(Log.ERROR,"Bitmap block @%d already used?" % bm_block, blk_num) - else: - # read bitmap block - bi = self.block_scan.read_block(bm_block, is_bm=True) - if bi == None: - self.log.msg(Log.ERROR,"Error reading bitmap block @%d" % bm_block, blk_num) - else: - self.bm_blocks.append(bi) + self.last_mask = (1 << last_filled_bits) - 1 + self.log.msg(Log.DEBUG, "Last DWORD mask: %08x" % self.last_mask) + # now scan bitmap blocks and build list of all bitmap blocks + self.read_bitmap_ptrs_and_blocks(root) + found_blocks = len(self.bm_blocks) + self.log.msg(Log.DEBUG, "Found Bitmap Blocks: %d" % found_blocks) + # check number of blocks + if found_blocks != self.num_bm_blocks: + self.log.msg( + Log.ERROR, + "Invalid number of Bitmap Blocks: found=%d expected=%d" + % (found_blocks, self.num_bm_blocks), + root.blk_num, + ) else: - list_end = True - else: - # make sure no blocks are referenced - if bm_block != 0: - self.log.msg(Log.ERROR,"Referenced bitmap block @%d beyond end of list" % bm_block, blk_num) + # check bits in bitmap + self.check_bits() + + def check_bits(self): + """calculate allocation bits and verify with stored ones""" + # block range + blkdev = self.block_scan.blkdev + # first bitmap data + cur_pos = 0 + bm_blk = 0 + cur_data = self.bm_blocks[0].bitmap + blk_size = len(cur_data) + # loop throug all bitmap longwords + lw = 0 + blk_num = blkdev.reserved + max_lw = self.num_bm_lwords - 1 + while lw < max_lw: + got = struct.unpack_from(">I", cur_data, cur_pos)[0] + expect = self.calc_lword(blk_num) + if got != expect: + self.log.msg( + Log.ERROR, + "Invalid bitmap allocation (@%d: #%d+%d) blks [%d...%d] got=%08x expect=%08x" + % (lw, bm_blk, cur_pos / 4, blk_num, blk_num + 31, got, expect), + ) + lw += 1 + blk_num += 32 + # fetch next bitmap data block + cur_pos += 4 + if cur_pos == blk_size: + bm_blk += 1 + cur_data = self.bm_blocks[bm_blk].bitmap + cur_pos = 0 + # the last long word + got = struct.unpack_from(">I", cur_data, cur_pos)[0] & self.last_mask + expect = self.calc_lword(blk_num) & self.last_mask + if got != expect: + self.log.msg( + Log.ERROR, + "Invalid bitmap allocation (last) (@%d: #%d+%d) blks [%d...%d] got=%08x expect=%08x" + % ( + lw, + bm_blk, + cur_pos / 4, + blk_num, + blkdev.num_blocks - 1, + got, + expect, + ), + ) + + def calc_lword(self, blk_num): + """calcuate the bitmap lword""" + value = 0 + for i in range(32): + # set bit in lword if block is available + if not self.block_scan.is_block_available(blk_num + i): + mask = 1 << i + value |= mask + return value + + def read_bitmap_ptrs_and_blocks(self, root): + """build the list of all file system bitmap blocks""" + self.bm_blocks = [] + # scan list embedded in root block + self.read_bm_list(root.bitmap_ptrs, root.blk_num) + # now follow bitmap extension blocks + cur_blk_num = root.blk_num + bm_ext = root.bitmap_ext_blk + while bm_ext != 0: + # check ext block + if self.block_scan.is_block_available(bm_ext): + self.log.msg( + Log.ERROR, + "Bitmap ext block @%d already used?" % bm_block, + cur_blk_num, + ) + else: + bi = self.block_scan.read_block(bm_ext, is_bm_ext=True) + if bi == None: + self.log.msg( + Log.ERROR, + "Error reading bitmap ext block @%d" % bm_ext, + cur_blk_num, + ) + break + else: + self.read_bm_list(bi.bitmap_ptrs, bm_ext) + cur_blk_num = bm_ext + bm_ext = bi.next_blk + + def read_bm_list(self, ptrs, blk_num): + list_end = False + for bm_block in ptrs: + # still inside the pointer list + if list_end == False: + # add a normal block + if bm_block != 0: + # make sure bitmap block was not used already + if self.block_scan.is_block_available(bm_block): + self.log.msg( + Log.ERROR, + "Bitmap block @%d already used?" % bm_block, + blk_num, + ) + else: + # read bitmap block + bi = self.block_scan.read_block(bm_block, is_bm=True) + if bi == None: + self.log.msg( + Log.ERROR, + "Error reading bitmap block @%d" % bm_block, + blk_num, + ) + else: + self.bm_blocks.append(bi) + else: + list_end = True + else: + # make sure no blocks are referenced + if bm_block != 0: + self.log.msg( + Log.ERROR, + "Referenced bitmap block @%d beyond end of list" % bm_block, + blk_num, + ) diff --git a/amitools/fs/validate/BlockScan.py b/amitools/fs/validate/BlockScan.py index e93b5952..c831ef3e 100644 --- a/amitools/fs/validate/BlockScan.py +++ b/amitools/fs/validate/BlockScan.py @@ -1,6 +1,3 @@ - - - import time from amitools.fs.block.Block import Block @@ -17,232 +14,256 @@ from amitools.fs.validate.Log import Log + class BlockInfo: - """Store essential info of a block""" - def __init__(self, blk_num): - self.blk_num = blk_num - self.blk_status = BlockScan.BS_UNKNOWN - self.blk_type = BlockScan.BT_UNKNOWN - self.used = False - self.own_key = None - - def __str__(self): - return str(self.__dict__) + """Store essential info of a block""" + + def __init__(self, blk_num): + self.blk_num = blk_num + self.blk_status = BlockScan.BS_UNKNOWN + self.blk_type = BlockScan.BT_UNKNOWN + self.used = False + self.own_key = None + + def __str__(self): + return str(self.__dict__) + class BlockScan: - """Scan a full volume and classify the blocks""" - - # block status - BS_UNKNOWN = 0 # undecided or unchecked - BS_READ_ERROR = 1 # error reading block - BS_INVALID = 2 # not a detected AmigaDOS block - BS_VALID = 3 # is a AmigaDOS block structure but type was not detected - BS_TYPE = 4 # detected block type - NUM_BS = 5 - - # block type - BT_UNKNOWN = 0 - BT_ROOT = 1 - BT_DIR = 2 - BT_FILE_HDR = 3 - BT_FILE_LIST = 4 - BT_FILE_DATA = 5 - BT_BITMAP = 6 - BT_BITMAP_EXT = 7 - BT_COMMENT = 8 - NUM_BT = 9 - - def __init__(self, blkdev, log, dos_type): - self.blkdev = blkdev - self.log = log - self.dos_type = dos_type - - self.map_status = None - self.map_type = None - self.block_map = [None] * self.blkdev.num_blocks - self.map_status = [[] for i in range(self.NUM_BS)] - self.map_type = [[] for i in range(self.NUM_BT)] - - def scan_all(self, progress=lambda x : x): - """Scan all blocks of the given block device + """Scan a full volume and classify the blocks""" + + # block status + BS_UNKNOWN = 0 # undecided or unchecked + BS_READ_ERROR = 1 # error reading block + BS_INVALID = 2 # not a detected AmigaDOS block + BS_VALID = 3 # is a AmigaDOS block structure but type was not detected + BS_TYPE = 4 # detected block type + NUM_BS = 5 + + # block type + BT_UNKNOWN = 0 + BT_ROOT = 1 + BT_DIR = 2 + BT_FILE_HDR = 3 + BT_FILE_LIST = 4 + BT_FILE_DATA = 5 + BT_BITMAP = 6 + BT_BITMAP_EXT = 7 + BT_COMMENT = 8 + NUM_BT = 9 + + def __init__(self, blkdev, log, dos_type): + self.blkdev = blkdev + self.log = log + self.dos_type = dos_type + + self.map_status = None + self.map_type = None + self.block_map = [None] * self.blkdev.num_blocks + self.map_status = [[] for i in range(self.NUM_BS)] + self.map_type = [[] for i in range(self.NUM_BT)] + + def scan_all(self, progress=lambda x: x): + """Scan all blocks of the given block device Return True if there is a chance that a file system will be found there """ - # range to scan - begin_blk = self.blkdev.reserved - num_blk = self.blkdev.num_blocks - self.blkdev.reserved - self.log.msg(Log.DEBUG,"block: checking range: +%d num=%d" % (begin_blk, num_blk)) - - # scan all blocks - for n in range(num_blk): - blk_num = n + begin_blk - - # read/get block - bi = self.get_block(blk_num) - - # own key ok? - if bi != None: - if bi.blk_status == self.BS_TYPE: - if bi.own_key != None and bi.own_key != blk_num: - self.log.msg(Log.ERROR, "Own key is invalid: %d type: %d" % (bi.own_key, bi.blk_type), blk_num) - - # first summary after block scan - num_error_blocks = len(self.map_status[self.BS_READ_ERROR]) - if num_error_blocks > 0: - self.log.msg(Log.ERROR, "%d unreadable error blocks found" % num_error_blocks) - num_valid_blocks = len(self.map_status[self.BS_VALID]) - if num_valid_blocks > 0: - self.log.msg(Log.INFO, "%d valid but unknown blocks found" % num_valid_blocks) - num_invalid_blocks = len(self.map_status[self.BS_INVALID]) - if num_invalid_blocks > 0: - self.log.msg(Log.INFO, "%d invalid blocks found" % num_invalid_blocks) - - def read_block(self, blk_num, is_bm=False, is_bm_ext=False): - """read block from device, decode it, and return block info instance""" - try: - # read block from device - if is_bm: - blk = BitmapBlock(self.blkdev, blk_num) - elif is_bm_ext: - blk = BitmapExtBlock(self.blkdev, blk_num) - else: - blk = Block(self.blkdev, blk_num) - blk.read() - data = blk.data - # create block info - bi = BlockInfo(blk_num) - # --- classify block --- - if blk.valid: - # block is valid AmigaDOS - bi.blk_status = self.BS_VALID - # --- bitmap block --- - if is_bm: - bi.blk_type = self.BT_BITMAP - bi.blk_status = self.BS_TYPE - bi.bitmap = blk.get_bitmap_data() - # --- bitmap ext block --- - elif is_bm_ext: - bi.blk_type = self.BT_BITMAP_EXT - bi.blk_status = self.BS_TYPE - bi.bitmap_ptrs = blk.bitmap_ptrs - bi.next_blk = blk.bitmap_ext_blk - # --- root block --- - elif blk.is_root_block(): - bi.blk_type = self.BT_ROOT - bi.blk_status = self.BS_TYPE - root = RootBlock(self.blkdev, blk_num) - root.set(data) - bi.name = root.name - bi.hash_table = root.hash_table - bi.parent_blk = 0 - self.log.msg(Log.DEBUG, "Found Root: '%s'" % bi.name, blk_num) - # chech hash size - nht = len(root.hash_table) - if root.hash_size != nht: - self.log.msg(Log.ERROR, "Root block hash table size mismatch", blk_num) - eht = self.blkdev.block_longs - 56 - if nht != eht: - self.log.msg(Log.WARN, "Root block does not have normal hash size: %d != %d" % (nht, eht), blk_num) - # --- user dir block --- - elif blk.is_user_dir_block(): - bi.blk_type = self.BT_DIR - bi.blk_status = self.BS_TYPE - user = UserDirBlock(self.blkdev, blk_num, DosType.is_longname(self.dos_type)) - user.set(data) - bi.name = user.name - bi.parent_blk = user.parent - bi.next_blk = user.hash_chain - bi.hash_table = user.hash_table - bi.own_key = user.own_key - self.log.msg(Log.DEBUG, "Found Dir : '%s'" % bi.name, blk_num) - # --- filter header block --- - elif blk.is_file_header_block(): - bi.blk_type = self.BT_FILE_HDR - bi.blk_status = self.BS_TYPE - fh = FileHeaderBlock(self.blkdev, blk_num, DosType.is_longname(self.dos_type)) - fh.set(data) - bi.name = fh.name - bi.parent_blk = fh.parent - bi.next_blk = fh.hash_chain - bi.own_key = fh.own_key - bi.byte_size = fh.byte_size - bi.data_blocks = fh.data_blocks - bi.extension = fh.extension - self.log.msg(Log.DEBUG, "Found File: '%s'" % bi.name, blk_num) - # --- file list block --- - elif blk.is_file_list_block(): - bi.blk_type = self.BT_FILE_LIST - fl = FileListBlock(self.blkdev, blk_num) - fl.set(data) - bi.blk_status = self.BS_TYPE - bi.ext_blk = fl.extension - bi.blk_list = fl.data_blocks - bi.own_key = fl.own_key - bi.data_blocks = fl.data_blocks - bi.extension = fl.extension - bi.parent_blk = fl.parent - # --- file data block (OFS) --- - elif blk.is_file_data_block(): - bi.blk_type = self.BT_FILE_DATA - bi.blk_status = self.BS_TYPE - fd = FileDataBlock(self.blkdev, blk_num) - fd.set(data) - bi.data_size = fd.data_size - bi.hdr_key = fd.hdr_key - bi.seq_num = fd.seq_num - elif blk.is_comment_block(): - bi.blk_type = self.BT_COMMENT - bi.blk_status = self.BS_TYPE - cblk = CommentBlock(self.blkdev, blk_num) - bi.hdr_key = cblk.hdr_key - bi.own_key = cblk.own_key - - except IOError as e: - self.log.msg(Log.ERROR, "Can't read block", blk_num) - bi = BlockInfo(blk_num) - bi.blk_status = BS_READ_ERROR - - # sort block info into map and arrays assigned by status/type - self.block_map[blk_num] = bi - self.map_status[bi.blk_status].append(bi) - self.map_type[bi.blk_type].append(bi) - return bi - - def any_chance_of_fs(self): - """is there any chance to find a FS on this block device?""" - num_dirs = len(self.map_type[self.BT_DIR]) - num_files = len(self.map_type[self.BT_FILE_HDR]) - num_roots = len(self.map_type[self.BT_ROOT]) - return (num_files > 0) or ((num_roots + num_dirs) > 0) - - def get_blocks_of_type(self, t): - return self.map_type[t] - - def get_blocks_with_key_value(self, key, value): - res = [] - for bi in self.block_map: - if hasattr(bi, key): - v = getattr(bi, key) - if v == value: - res.append(bi) - return res - - def is_block_available(self, num): - if num >= 0 and num < len(self.block_map): - return self.block_map[num] != None - else: - return False - - def get_block(self, num): - if num >= 0 and num < len(self.block_map): - bi = self.block_map[num] - if bi == None: - bi = self.read_block(num) - return bi - else: - return None - - def dump(self): - for b in self.block_map: - if b != None: - print(b) + # range to scan + begin_blk = self.blkdev.reserved + num_blk = self.blkdev.num_blocks - self.blkdev.reserved + self.log.msg( + Log.DEBUG, "block: checking range: +%d num=%d" % (begin_blk, num_blk) + ) + + # scan all blocks + for n in range(num_blk): + blk_num = n + begin_blk + + # read/get block + bi = self.get_block(blk_num) + + # own key ok? + if bi != None: + if bi.blk_status == self.BS_TYPE: + if bi.own_key != None and bi.own_key != blk_num: + self.log.msg( + Log.ERROR, + "Own key is invalid: %d type: %d" + % (bi.own_key, bi.blk_type), + blk_num, + ) + + # first summary after block scan + num_error_blocks = len(self.map_status[self.BS_READ_ERROR]) + if num_error_blocks > 0: + self.log.msg( + Log.ERROR, "%d unreadable error blocks found" % num_error_blocks + ) + num_valid_blocks = len(self.map_status[self.BS_VALID]) + if num_valid_blocks > 0: + self.log.msg( + Log.INFO, "%d valid but unknown blocks found" % num_valid_blocks + ) + num_invalid_blocks = len(self.map_status[self.BS_INVALID]) + if num_invalid_blocks > 0: + self.log.msg(Log.INFO, "%d invalid blocks found" % num_invalid_blocks) + + def read_block(self, blk_num, is_bm=False, is_bm_ext=False): + """read block from device, decode it, and return block info instance""" + try: + # read block from device + if is_bm: + blk = BitmapBlock(self.blkdev, blk_num) + elif is_bm_ext: + blk = BitmapExtBlock(self.blkdev, blk_num) + else: + blk = Block(self.blkdev, blk_num) + blk.read() + data = blk.data + # create block info + bi = BlockInfo(blk_num) + # --- classify block --- + if blk.valid: + # block is valid AmigaDOS + bi.blk_status = self.BS_VALID + # --- bitmap block --- + if is_bm: + bi.blk_type = self.BT_BITMAP + bi.blk_status = self.BS_TYPE + bi.bitmap = blk.get_bitmap_data() + # --- bitmap ext block --- + elif is_bm_ext: + bi.blk_type = self.BT_BITMAP_EXT + bi.blk_status = self.BS_TYPE + bi.bitmap_ptrs = blk.bitmap_ptrs + bi.next_blk = blk.bitmap_ext_blk + # --- root block --- + elif blk.is_root_block(): + bi.blk_type = self.BT_ROOT + bi.blk_status = self.BS_TYPE + root = RootBlock(self.blkdev, blk_num) + root.set(data) + bi.name = root.name + bi.hash_table = root.hash_table + bi.parent_blk = 0 + self.log.msg(Log.DEBUG, "Found Root: '%s'" % bi.name, blk_num) + # chech hash size + nht = len(root.hash_table) + if root.hash_size != nht: + self.log.msg( + Log.ERROR, "Root block hash table size mismatch", blk_num + ) + eht = self.blkdev.block_longs - 56 + if nht != eht: + self.log.msg( + Log.WARN, + "Root block does not have normal hash size: %d != %d" + % (nht, eht), + blk_num, + ) + # --- user dir block --- + elif blk.is_user_dir_block(): + bi.blk_type = self.BT_DIR + bi.blk_status = self.BS_TYPE + user = UserDirBlock( + self.blkdev, blk_num, DosType.is_longname(self.dos_type) + ) + user.set(data) + bi.name = user.name + bi.parent_blk = user.parent + bi.next_blk = user.hash_chain + bi.hash_table = user.hash_table + bi.own_key = user.own_key + self.log.msg(Log.DEBUG, "Found Dir : '%s'" % bi.name, blk_num) + # --- filter header block --- + elif blk.is_file_header_block(): + bi.blk_type = self.BT_FILE_HDR + bi.blk_status = self.BS_TYPE + fh = FileHeaderBlock( + self.blkdev, blk_num, DosType.is_longname(self.dos_type) + ) + fh.set(data) + bi.name = fh.name + bi.parent_blk = fh.parent + bi.next_blk = fh.hash_chain + bi.own_key = fh.own_key + bi.byte_size = fh.byte_size + bi.data_blocks = fh.data_blocks + bi.extension = fh.extension + self.log.msg(Log.DEBUG, "Found File: '%s'" % bi.name, blk_num) + # --- file list block --- + elif blk.is_file_list_block(): + bi.blk_type = self.BT_FILE_LIST + fl = FileListBlock(self.blkdev, blk_num) + fl.set(data) + bi.blk_status = self.BS_TYPE + bi.ext_blk = fl.extension + bi.blk_list = fl.data_blocks + bi.own_key = fl.own_key + bi.data_blocks = fl.data_blocks + bi.extension = fl.extension + bi.parent_blk = fl.parent + # --- file data block (OFS) --- + elif blk.is_file_data_block(): + bi.blk_type = self.BT_FILE_DATA + bi.blk_status = self.BS_TYPE + fd = FileDataBlock(self.blkdev, blk_num) + fd.set(data) + bi.data_size = fd.data_size + bi.hdr_key = fd.hdr_key + bi.seq_num = fd.seq_num + elif blk.is_comment_block(): + bi.blk_type = self.BT_COMMENT + bi.blk_status = self.BS_TYPE + cblk = CommentBlock(self.blkdev, blk_num) + bi.hdr_key = cblk.hdr_key + bi.own_key = cblk.own_key + + except IOError as e: + self.log.msg(Log.ERROR, "Can't read block", blk_num) + bi = BlockInfo(blk_num) + bi.blk_status = BS_READ_ERROR + + # sort block info into map and arrays assigned by status/type + self.block_map[blk_num] = bi + self.map_status[bi.blk_status].append(bi) + self.map_type[bi.blk_type].append(bi) + return bi + + def any_chance_of_fs(self): + """is there any chance to find a FS on this block device?""" + num_dirs = len(self.map_type[self.BT_DIR]) + num_files = len(self.map_type[self.BT_FILE_HDR]) + num_roots = len(self.map_type[self.BT_ROOT]) + return (num_files > 0) or ((num_roots + num_dirs) > 0) + + def get_blocks_of_type(self, t): + return self.map_type[t] + + def get_blocks_with_key_value(self, key, value): + res = [] + for bi in self.block_map: + if hasattr(bi, key): + v = getattr(bi, key) + if v == value: + res.append(bi) + return res + + def is_block_available(self, num): + if num >= 0 and num < len(self.block_map): + return self.block_map[num] != None + else: + return False + + def get_block(self, num): + if num >= 0 and num < len(self.block_map): + bi = self.block_map[num] + if bi == None: + bi = self.read_block(num) + return bi + else: + return None + def dump(self): + for b in self.block_map: + if b != None: + print(b) diff --git a/amitools/fs/validate/DirScan.py b/amitools/fs/validate/DirScan.py index d77944fe..72844dfd 100644 --- a/amitools/fs/validate/DirScan.py +++ b/amitools/fs/validate/DirScan.py @@ -1,237 +1,286 @@ - - - from .BlockScan import BlockScan from amitools.fs.FSString import FSString from amitools.fs.FileName import FileName from amitools.fs.validate.Log import Log import amitools.fs.DosType as DosType + class DirChainEntry: - """entry of the hash chain""" - def __init__(self, blk_info): - self.blk_info = blk_info - self.parent_ok = False - self.fn_hash_ok = False - self.valid = False - self.end = False - self.orphaned = False - self.sub = None - - def __str__(self): - l = [] - if self.parent_ok: - l.append("parent_ok") - if self.fn_hash_ok: - l.append("fn_hash_ok") - if self.valid: - l.append("valid") - if self.end: - l.append("end") - if self.orphaned: - l.append("orphaned") - return "[DCE @%d '%s': %s]" % \ - (self.blk_info.blk_num, self.blk_info.name, " ".join(l)) + """entry of the hash chain""" + + def __init__(self, blk_info): + self.blk_info = blk_info + self.parent_ok = False + self.fn_hash_ok = False + self.valid = False + self.end = False + self.orphaned = False + self.sub = None + + def __str__(self): + l = [] + if self.parent_ok: + l.append("parent_ok") + if self.fn_hash_ok: + l.append("fn_hash_ok") + if self.valid: + l.append("valid") + if self.end: + l.append("end") + if self.orphaned: + l.append("orphaned") + return "[DCE @%d '%s': %s]" % ( + self.blk_info.blk_num, + self.blk_info.name, + " ".join(l), + ) class DirChain: - """representing a chain of the hashtable in a directory""" - def __init__(self, hash_val): - self.hash_val = hash_val - self.chain = [] - - def add(self, dce): - self.chain.append(dce) - - def get_entries(self): - return self.chain - - def __str__(self): - return "{DirChain +%d: #%d}" % (self.hash_val, len(self.chain)) - + """representing a chain of the hashtable in a directory""" + + def __init__(self, hash_val): + self.hash_val = hash_val + self.chain = [] + + def add(self, dce): + self.chain.append(dce) + + def get_entries(self): + return self.chain + + def __str__(self): + return "{DirChain +%d: #%d}" % (self.hash_val, len(self.chain)) + class DirInfo: - """information structure on a directory""" - def __init__(self, blk_info): - self.blk_info = blk_info - self.chains = {} - self.children = [] - - def add(self, dc): - self.chains[dc.hash_val] = dc - - def add_child(self, c): - self.children.append(c) - - def get(self, hash_val): - if hash_val in self.chains: - return self.chains[hash_val] - else: - return None - - def get_chains(self): - return self.chains - - def __str__(self): - bi = self.blk_info - blk_num = bi.blk_num - name = bi.name - parent_blk = bi.parent_blk - return "" % (blk_num, name, len(self.chains), parent_blk, len(self.children)) + """information structure on a directory""" + + def __init__(self, blk_info): + self.blk_info = blk_info + self.chains = {} + self.children = [] + + def add(self, dc): + self.chains[dc.hash_val] = dc + + def add_child(self, c): + self.children.append(c) + + def get(self, hash_val): + if hash_val in self.chains: + return self.chains[hash_val] + else: + return None + + def get_chains(self): + return self.chains + + def __str__(self): + bi = self.blk_info + blk_num = bi.blk_num + name = bi.name + parent_blk = bi.parent_blk + return "" % ( + blk_num, + name, + len(self.chains), + parent_blk, + len(self.children), + ) class DirScan: - """directory tree scanner""" - - def __init__(self, block_scan, log): - self.log = log - self.block_scan = block_scan - self.root_di = None - self.intl = DosType.is_intl(block_scan.dos_type) - self.files = [] - self.dirs = [] - - def scan_tree(self, root_blk_num, progress=None): - """scan the root tree""" - # get root block info - root_bi = self.block_scan.get_block(root_blk_num) - if root_bi == None: - self.log.msg(Log.ERROR,"Root block not found?!",root_blk_num) - return None - # do tree scan - if progress != None: - progress.begin("dir") - self.root_di = self.scan_dir(root_bi, progress) - if progress != None: - progress.end() - return self.root_di - - def scan_dir(self, dir_bi, progress): - """check a directory by scanning through the hash table entries and follow the chains + """directory tree scanner""" + + def __init__(self, block_scan, log): + self.log = log + self.block_scan = block_scan + self.root_di = None + self.intl = DosType.is_intl(block_scan.dos_type) + self.files = [] + self.dirs = [] + + def scan_tree(self, root_blk_num, progress=None): + """scan the root tree""" + # get root block info + root_bi = self.block_scan.get_block(root_blk_num) + if root_bi == None: + self.log.msg(Log.ERROR, "Root block not found?!", root_blk_num) + return None + # do tree scan + if progress != None: + progress.begin("dir") + self.root_di = self.scan_dir(root_bi, progress) + if progress != None: + progress.end() + return self.root_di + + def scan_dir(self, dir_bi, progress): + """check a directory by scanning through the hash table entries and follow the chains Returns (all_chains_ok, dir_obj) """ - # create new dir info - di = DirInfo(dir_bi) - self.dirs.append(di) - - # run through hash_table of directory and build chains - chains = {} - hash_val = 0 - for blk_num in dir_bi.hash_table: - if blk_num != 0: - # build chain - chain = DirChain(hash_val) - self.build_chain(chain, dir_bi, blk_num, progress) - di.add(chain) - hash_val += 1 - - return di - - def build_chain(self, chain, dir_blk_info, blk_num, progress): - """build a block chain""" - dir_blk_num = dir_blk_info.blk_num - dir_name = dir_blk_info.name - hash_val = chain.hash_val - - # make sure entry block is first used - block_used = self.block_scan.is_block_available(blk_num) - - # get entry block - blk_info = self.block_scan.read_block(blk_num) - - # create dir chain entry - dce = DirChainEntry(blk_info) - chain.add(dce) - - # account - if progress != None: - progress.add() - - # block already used? - if block_used: - self.log.msg(Log.ERROR, "dir block already used in chain #%d of dir '%s (%d)" % (hash_val, dir_name, dir_blk_num), blk_num) - dce.end = True - return - - # self reference? - if blk_num == dir_blk_num: - self.log.msg(Log.ERROR, "dir block in its own chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num) - dce.end = True - return - - # not a block in range - if blk_info == None: - self.log.msg(Log.ERROR, "out-of-range block terminates chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num) - dce.end = True - return - - # check type of entry block - b_type = blk_info.blk_type - if b_type not in (BlockScan.BT_DIR, BlockScan.BT_FILE_HDR): - self.log.msg(Log.ERROR, "invalid block terminates chain #%d of dir '%s' (%d)" % (hash_val, dir_name, dir_blk_num), blk_num) - dce.end = True - return - - # check referenceed block type in chain - blk_type = blk_info.blk_type - if blk_type in (BlockScan.BT_ROOT, BlockScan.BT_FILE_LIST, BlockScan.BT_FILE_DATA): - self.log.msg(Log.ERROR, "invalid block type %d terminates chain #%d of dir '%s' (%d)" % (blk_type, hash_val, dir_name, dir_blk_num), blk_num) - dce.end = True - return - - # all following are ok - dce.valid = True - - # check parent of block - name = blk_info.name - dce.parent_ok = (blk_info.parent_blk == dir_blk_num) - if not dce.parent_ok: - self.log.msg(Log.ERROR, "invalid parent in '%s' chain #%d of dir '%s' (%d)" % (name, hash_val, dir_name, dir_blk_num), blk_num) - - # check name hash - fn = FileName(name, self.intl) - fn_hash = fn.hash() - dce.fn_hash_ok = (fn_hash == hash_val) - if not dce.fn_hash_ok: - self.log.msg(Log.ERROR, "invalid name hash in '%s' chain #%d of dir '%s' (%d)" % (name, hash_val, dir_name, dir_blk_num), blk_num) - - # recurse into dir? - if blk_type == BlockScan.BT_DIR: - dce.sub = self.scan_dir(blk_info, progress) - elif blk_type == BlockScan.BT_FILE_HDR: - self.files.append(dce) - - # check next block in chain - next_blk = blk_info.next_blk - if next_blk != 0: - self.build_chain(chain, dir_blk_info, next_blk, progress) - else: - dce.end = True - - def get_all_file_hdr_blk_infos(self): - """return all file chain entries""" - result = [] - for f in self.files: - result.append(f.blk_info) - return result - - def get_all_dir_infos(self): - """return all dir infos""" - return self.dirs - - def dump(self): - """dump whole dir info structure""" - self.dump_dir_info(self.root_di, 0) - - def dump_dir_info(self, di, indent): - """dump a single dir info structure and its sub dirs""" - istr = " " * indent - print(istr, di) - for hash_value in sorted(di.get_chains().keys()): - dc = di.get(hash_value) - print(istr, " ", dc) - for dce in dc.get_entries(): - print(istr, " ", dce) - sub = dce.sub - if sub != None and dce.blk_info.blk_type == BlockScan.BT_DIR: - self.dump_dir_info(sub, indent+1) + # create new dir info + di = DirInfo(dir_bi) + self.dirs.append(di) + + # run through hash_table of directory and build chains + chains = {} + hash_val = 0 + for blk_num in dir_bi.hash_table: + if blk_num != 0: + # build chain + chain = DirChain(hash_val) + self.build_chain(chain, dir_bi, blk_num, progress) + di.add(chain) + hash_val += 1 + + return di + + def build_chain(self, chain, dir_blk_info, blk_num, progress): + """build a block chain""" + dir_blk_num = dir_blk_info.blk_num + dir_name = dir_blk_info.name + hash_val = chain.hash_val + + # make sure entry block is first used + block_used = self.block_scan.is_block_available(blk_num) + + # get entry block + blk_info = self.block_scan.read_block(blk_num) + + # create dir chain entry + dce = DirChainEntry(blk_info) + chain.add(dce) + + # account + if progress != None: + progress.add() + + # block already used? + if block_used: + self.log.msg( + Log.ERROR, + "dir block already used in chain #%d of dir '%s (%d)" + % (hash_val, dir_name, dir_blk_num), + blk_num, + ) + dce.end = True + return + + # self reference? + if blk_num == dir_blk_num: + self.log.msg( + Log.ERROR, + "dir block in its own chain #%d of dir '%s' (%d)" + % (hash_val, dir_name, dir_blk_num), + blk_num, + ) + dce.end = True + return + + # not a block in range + if blk_info == None: + self.log.msg( + Log.ERROR, + "out-of-range block terminates chain #%d of dir '%s' (%d)" + % (hash_val, dir_name, dir_blk_num), + blk_num, + ) + dce.end = True + return + + # check type of entry block + b_type = blk_info.blk_type + if b_type not in (BlockScan.BT_DIR, BlockScan.BT_FILE_HDR): + self.log.msg( + Log.ERROR, + "invalid block terminates chain #%d of dir '%s' (%d)" + % (hash_val, dir_name, dir_blk_num), + blk_num, + ) + dce.end = True + return + + # check referenceed block type in chain + blk_type = blk_info.blk_type + if blk_type in ( + BlockScan.BT_ROOT, + BlockScan.BT_FILE_LIST, + BlockScan.BT_FILE_DATA, + ): + self.log.msg( + Log.ERROR, + "invalid block type %d terminates chain #%d of dir '%s' (%d)" + % (blk_type, hash_val, dir_name, dir_blk_num), + blk_num, + ) + dce.end = True + return + + # all following are ok + dce.valid = True + + # check parent of block + name = blk_info.name + dce.parent_ok = blk_info.parent_blk == dir_blk_num + if not dce.parent_ok: + self.log.msg( + Log.ERROR, + "invalid parent in '%s' chain #%d of dir '%s' (%d)" + % (name, hash_val, dir_name, dir_blk_num), + blk_num, + ) + + # check name hash + fn = FileName(name, self.intl) + fn_hash = fn.hash() + dce.fn_hash_ok = fn_hash == hash_val + if not dce.fn_hash_ok: + self.log.msg( + Log.ERROR, + "invalid name hash in '%s' chain #%d of dir '%s' (%d)" + % (name, hash_val, dir_name, dir_blk_num), + blk_num, + ) + + # recurse into dir? + if blk_type == BlockScan.BT_DIR: + dce.sub = self.scan_dir(blk_info, progress) + elif blk_type == BlockScan.BT_FILE_HDR: + self.files.append(dce) + + # check next block in chain + next_blk = blk_info.next_blk + if next_blk != 0: + self.build_chain(chain, dir_blk_info, next_blk, progress) + else: + dce.end = True + + def get_all_file_hdr_blk_infos(self): + """return all file chain entries""" + result = [] + for f in self.files: + result.append(f.blk_info) + return result + + def get_all_dir_infos(self): + """return all dir infos""" + return self.dirs + + def dump(self): + """dump whole dir info structure""" + self.dump_dir_info(self.root_di, 0) + + def dump_dir_info(self, di, indent): + """dump a single dir info structure and its sub dirs""" + istr = " " * indent + print(istr, di) + for hash_value in sorted(di.get_chains().keys()): + dc = di.get(hash_value) + print(istr, " ", dc) + for dce in dc.get_entries(): + print(istr, " ", dce) + sub = dce.sub + if sub != None and dce.blk_info.blk_type == BlockScan.BT_DIR: + self.dump_dir_info(sub, indent + 1) diff --git a/amitools/fs/validate/FileScan.py b/amitools/fs/validate/FileScan.py index 7a5989ba..6d4a3935 100644 --- a/amitools/fs/validate/FileScan.py +++ b/amitools/fs/validate/FileScan.py @@ -1,124 +1,168 @@ - - - from .BlockScan import BlockScan from amitools.fs.FSString import FSString from amitools.fs.FileName import FileName from amitools.fs.validate.Log import Log import amitools.fs.DosType as DosType + class FileData: - def __init__(self, bi): - self.bi = bi + def __init__(self, bi): + self.bi = bi + class FileInfo: - def __init__(self, bi): - self.bi = bi + def __init__(self, bi): + self.bi = bi + class FileScan: - def __init__(self, block_scan, log, dos_type): - self.block_scan = block_scan - self.log = log - self.dos_type = dos_type - self.ffs = DosType.is_ffs(self.dos_type) - self.infos = [] - - def scan_all_files(self, all_file_hdr_block_infos, progress=None): - """scan through all files""" - if progress != None: - progress.begin("file") - for bi in all_file_hdr_block_infos: - self.scan_file(bi) - progress.add() - progress.end() - else: - for bi in all_file_hdr_block_infos: - self.scan_file(bi) - - def scan_file(self, bi): - """scan a file header block info and create a FileInfo instance""" - fi = FileInfo(bi) - self.infos.append(fi) - - info = "'%s' (@%d)" % (bi.name, bi.blk_num) - - # scan for data blocks - linked_data_blocks = bi.data_blocks - blk_num = bi.blk_num - # run through file list blocks linked by extension - sbi = bi - aborted = False - num = 0 - while sbi.extension != 0 and sbi.extension < self.block_scan.blkdev.num_blocks: - # check usage of block - if self.block_scan.is_block_available(sbi.extension): - self.log.msg(Log.ERROR, "File ext block #%d of %s already used" % (num, info), sbi.extension) - aborted = True - break - # get block - ebi = self.block_scan.get_block(sbi.extension) - if ebi == None: - aborted = True - break - # check block type - if ebi.blk_type != BlockScan.BT_FILE_LIST: - self.log.msg(Log.ERROR, "File ext block #%d of %s is no ext block" % (num, info), ebi.blk_num) - aborted = True - break - # check for parent link - if ebi.parent_blk != blk_num: - self.log.msg(Log.ERROR, "File ext block #%d of %s has invalid parent: got %d != expect %d" % (num, info, ebi.parent_blk, blk_num), ebi.blk_num) - # warn if data blocks is not full - ndb = len(ebi.data_blocks) - if ebi.extension != 0 and ndb != self.block_scan.blkdev.block_longs - 56: - self.log.msg(Log.WARN, "File ext block #%d of %s has incomplete data refs: got %d" % (num, info, ndb), ebi.blk_num) - # add data blocks - linked_data_blocks += ebi.data_blocks - sbi = ebi - num += 1 - - # transform the data block numbers to file data - file_datas = [] - seq_num = 1 - for data_blk in linked_data_blocks: - # get block - block_used = self.block_scan.is_block_available(data_blk) - dbi = self.block_scan.get_block(data_blk) - fd = FileData(dbi) - file_datas.append(fd) - # check usage of block - # is block available - if dbi == None: - self.log.msg(Log.ERROR, "File data block #%d of %s not found" % (seq_num,info), data_blk) - if block_used: - self.log.msg(Log.ERROR, "File data block #%d of %s already used" % (seq_num,info), data_blk) - fd.bi = None - # in ofs check data blocks - if not self.ffs: - # check block type - if dbi.blk_type != BlockScan.BT_FILE_DATA: - self.log.msg(Log.ERROR, "File data block #%d of %s is no data block" % (seq_num,info), data_blk) - fd.bi = None + def __init__(self, block_scan, log, dos_type): + self.block_scan = block_scan + self.log = log + self.dos_type = dos_type + self.ffs = DosType.is_ffs(self.dos_type) + self.infos = [] + + def scan_all_files(self, all_file_hdr_block_infos, progress=None): + """scan through all files""" + if progress != None: + progress.begin("file") + for bi in all_file_hdr_block_infos: + self.scan_file(bi) + progress.add() + progress.end() else: - # check header ref: must point to file header - if dbi.hdr_key != blk_num: - self.log.msg(Log.ERROR, "File data block #%d of %s does not ref header: got %d != expect %d" % (seq_num,info,dbi.hdr_key, blk_num), data_blk) - # check sequence number - if dbi.seq_num != seq_num: - self.log.msg(Log.ERROR, "File data block #%d of %s seq num mismatch: got %d" % (seq_num,info,dbi.seq_num), data_blk) - seq_num += 1 - - # check size of file in bytes - block_data_bytes = self.block_scan.blkdev.block_bytes - if not self.ffs: - block_data_bytes -= 24 - file_est_blocks = (bi.byte_size + block_data_bytes - 1) // block_data_bytes - num_data_blocks = len(linked_data_blocks) - if file_est_blocks != num_data_blocks: - self.log.msg(Log.ERROR, "File %s with %d bytes has wrong number of data blocks: got %d != expect %d" % (info, bi.byte_size, num_data_blocks, file_est_blocks), bi.blk_num) - - return fi - - def dump(self): - pass - \ No newline at end of file + for bi in all_file_hdr_block_infos: + self.scan_file(bi) + + def scan_file(self, bi): + """scan a file header block info and create a FileInfo instance""" + fi = FileInfo(bi) + self.infos.append(fi) + + info = "'%s' (@%d)" % (bi.name, bi.blk_num) + + # scan for data blocks + linked_data_blocks = bi.data_blocks + blk_num = bi.blk_num + # run through file list blocks linked by extension + sbi = bi + aborted = False + num = 0 + while sbi.extension != 0 and sbi.extension < self.block_scan.blkdev.num_blocks: + # check usage of block + if self.block_scan.is_block_available(sbi.extension): + self.log.msg( + Log.ERROR, + "File ext block #%d of %s already used" % (num, info), + sbi.extension, + ) + aborted = True + break + # get block + ebi = self.block_scan.get_block(sbi.extension) + if ebi == None: + aborted = True + break + # check block type + if ebi.blk_type != BlockScan.BT_FILE_LIST: + self.log.msg( + Log.ERROR, + "File ext block #%d of %s is no ext block" % (num, info), + ebi.blk_num, + ) + aborted = True + break + # check for parent link + if ebi.parent_blk != blk_num: + self.log.msg( + Log.ERROR, + "File ext block #%d of %s has invalid parent: got %d != expect %d" + % (num, info, ebi.parent_blk, blk_num), + ebi.blk_num, + ) + # warn if data blocks is not full + ndb = len(ebi.data_blocks) + if ebi.extension != 0 and ndb != self.block_scan.blkdev.block_longs - 56: + self.log.msg( + Log.WARN, + "File ext block #%d of %s has incomplete data refs: got %d" + % (num, info, ndb), + ebi.blk_num, + ) + # add data blocks + linked_data_blocks += ebi.data_blocks + sbi = ebi + num += 1 + + # transform the data block numbers to file data + file_datas = [] + seq_num = 1 + for data_blk in linked_data_blocks: + # get block + block_used = self.block_scan.is_block_available(data_blk) + dbi = self.block_scan.get_block(data_blk) + fd = FileData(dbi) + file_datas.append(fd) + # check usage of block + # is block available + if dbi == None: + self.log.msg( + Log.ERROR, + "File data block #%d of %s not found" % (seq_num, info), + data_blk, + ) + if block_used: + self.log.msg( + Log.ERROR, + "File data block #%d of %s already used" % (seq_num, info), + data_blk, + ) + fd.bi = None + # in ofs check data blocks + if not self.ffs: + # check block type + if dbi.blk_type != BlockScan.BT_FILE_DATA: + self.log.msg( + Log.ERROR, + "File data block #%d of %s is no data block" % (seq_num, info), + data_blk, + ) + fd.bi = None + else: + # check header ref: must point to file header + if dbi.hdr_key != blk_num: + self.log.msg( + Log.ERROR, + "File data block #%d of %s does not ref header: got %d != expect %d" + % (seq_num, info, dbi.hdr_key, blk_num), + data_blk, + ) + # check sequence number + if dbi.seq_num != seq_num: + self.log.msg( + Log.ERROR, + "File data block #%d of %s seq num mismatch: got %d" + % (seq_num, info, dbi.seq_num), + data_blk, + ) + seq_num += 1 + + # check size of file in bytes + block_data_bytes = self.block_scan.blkdev.block_bytes + if not self.ffs: + block_data_bytes -= 24 + file_est_blocks = (bi.byte_size + block_data_bytes - 1) // block_data_bytes + num_data_blocks = len(linked_data_blocks) + if file_est_blocks != num_data_blocks: + self.log.msg( + Log.ERROR, + "File %s with %d bytes has wrong number of data blocks: got %d != expect %d" + % (info, bi.byte_size, num_data_blocks, file_est_blocks), + bi.blk_num, + ) + + return fi + + def dump(self): + pass diff --git a/amitools/fs/validate/Log.py b/amitools/fs/validate/Log.py index fa47958c..08bb9a8e 100644 --- a/amitools/fs/validate/Log.py +++ b/amitools/fs/validate/Log.py @@ -1,45 +1,45 @@ +class LogEntry: + """A class for a log entry""" + names = ("debug", "info ", "WARN ", "ERROR") + def __init__(self, level, msg, blk_num=-1): + self.blk_num = blk_num + self.level = level + self.msg = msg + + def __str__(self): + if self.blk_num == -1: + return "%s%s:%s" % (" " * 8, self.names[self.level], self.msg) + else: + return "@%06d:%s:%s" % (self.blk_num, self.names[self.level], self.msg) -class LogEntry: - """A class for a log entry""" - names = ('debug','info ','WARN ','ERROR') - - def __init__(self, level, msg, blk_num=-1): - self.blk_num = blk_num - self.level = level - self.msg = msg - def __str__(self): - if self.blk_num == -1: - return "%s%s:%s" % (" "*8, self.names[self.level], self.msg) - else: - return "@%06d:%s:%s" % (self.blk_num, self.names[self.level], self.msg) class Log: - """Store a log of entries""" - - DEBUG = 0 - INFO = 1 - WARN = 2 - ERROR = 3 - - def __init__(self, min_level): - self.entries = [] - self.min_level = min_level - - def msg(self, level, msg, blk_num = -1): - if level < self.min_level: - return - e = LogEntry(level, msg, blk_num) - self.entries.append(e) - - def dump(self): - for e in self.entries: - print(e) - - def get_num_level(self, level): - num = 0 - for e in self.entries: - if e.level == level: - num += 1 - return num + """Store a log of entries""" + + DEBUG = 0 + INFO = 1 + WARN = 2 + ERROR = 3 + + def __init__(self, min_level): + self.entries = [] + self.min_level = min_level + + def msg(self, level, msg, blk_num=-1): + if level < self.min_level: + return + e = LogEntry(level, msg, blk_num) + self.entries.append(e) + + def dump(self): + for e in self.entries: + print(e) + + def get_num_level(self, level): + num = 0 + for e in self.entries: + if e.level == level: + num += 1 + return num diff --git a/amitools/fs/validate/Progress.py b/amitools/fs/validate/Progress.py index e7218f44..ea567fd8 100644 --- a/amitools/fs/validate/Progress.py +++ b/amitools/fs/validate/Progress.py @@ -1,16 +1,17 @@ +import sys +class Progress: + def __init__(self): + self.num = 0 + self.msg = None -import sys + def begin(self, msg): + self.num = 0 + self.msg = msg -class Progress: - def __init__(self): - self.num = 0 - self.msg = None - def begin(self, msg): - self.num = 0 - self.msg = msg - def end(self): - pass - def add(self): - self.num += 1 \ No newline at end of file + def end(self): + pass + + def add(self): + self.num += 1 diff --git a/amitools/fs/validate/Validator.py b/amitools/fs/validate/Validator.py index a151c6ea..0c76e9b5 100644 --- a/amitools/fs/validate/Validator.py +++ b/amitools/fs/validate/Validator.py @@ -1,6 +1,3 @@ - - - from amitools.fs.block.BootBlock import BootBlock from amitools.fs.block.RootBlock import RootBlock @@ -11,99 +8,118 @@ from amitools.fs.validate.BitmapScan import BitmapScan import amitools.fs.DosType as DosType + class Validator: - """Validate an AmigaDOS file system""" - - def __init__(self, blkdev, min_level, debug=False, progress=None): - self.log = Log(min_level) - self.debug = debug - self.blkdev = blkdev - self.dos_type = None - self.boot = None - self.root = None - self.block_scan = None - self.progress = progress + """Validate an AmigaDOS file system""" + + def __init__(self, blkdev, min_level, debug=False, progress=None): + self.log = Log(min_level) + self.debug = debug + self.blkdev = blkdev + self.dos_type = None + self.boot = None + self.root = None + self.block_scan = None + self.progress = progress - def scan_boot(self): - """Step 1: scan boot block. + def scan_boot(self): + """Step 1: scan boot block. Returns (True, x) if boot block has a valid dos type. Returns (x, True) if boot block is bootable Invalid checksum of the block is tolerated but remarked. """ - # check boot block - boot = BootBlock(self.blkdev) - boot.read() - if boot.valid: - # dos type is valid - self.boot = boot - self.dos_type = boot.dos_type - # give a warning if checksum is not correct - if not boot.valid_chksum: - self.log.msg(Log.INFO,"invalid boot block checksum",0) - self.log.msg(Log.INFO,"dos type is '%s'" % DosType.get_dos_type_str(self.dos_type)) - return (True, boot.valid_chksum) - else: - self.log.msg(Log.ERROR,"invalid boot block dos type",0) - return (False, False) - - def scan_root(self): - """Step 2: scan root block. + # check boot block + boot = BootBlock(self.blkdev) + boot.read() + if boot.valid: + # dos type is valid + self.boot = boot + self.dos_type = boot.dos_type + # give a warning if checksum is not correct + if not boot.valid_chksum: + self.log.msg(Log.INFO, "invalid boot block checksum", 0) + self.log.msg( + Log.INFO, "dos type is '%s'" % DosType.get_dos_type_str(self.dos_type) + ) + return (True, boot.valid_chksum) + else: + self.log.msg(Log.ERROR, "invalid boot block dos type", 0) + return (False, False) + + def scan_root(self): + """Step 2: scan root block. Try to determine root block from boot block or guess number. Returns True if the root block could be decoded. """ - if self.boot != None: - # retrieve root block number from boot block - root_blk_num = self.boot.got_root_blk - # check root block number - if root_blk_num == 0: - new_root = self.blkdev.num_blocks // 2 - self.log.msg(Log.INFO,"Boot contains not Root blk. Using default: %d" % new_root,root_blk_num) - root_blk_num = new_root - elif root_blk_num < self.blkdev.reserved or root_blk_num > self.blkdev.num_blocks: - new_root = self.blkdev.num_blocks // 2 - self.log.msg(Log.INFO,"Invalid root block number: given %d using guess %d" % (root_blk_num, new_root),root_blk_num) - root_blk_num = new_root - else: - # guess root block number - root_blk_num = self.blkdev.num_blocks // 2 - self.log.msg(Log.INFO,"Guessed root block number",root_blk_num) - # read root block - root = RootBlock(self.blkdev, root_blk_num) - root.read() - if not root.valid: - self.log.msg(Log.INFO,"Root block is not valid -> No file system",root_blk_num) - self.root = None # mode without root - return False - else: - self.root = root - return True - - def scan_dir_tree(self): - """Step 3: scan directory structure + if self.boot != None: + # retrieve root block number from boot block + root_blk_num = self.boot.got_root_blk + # check root block number + if root_blk_num == 0: + new_root = self.blkdev.num_blocks // 2 + self.log.msg( + Log.INFO, + "Boot contains not Root blk. Using default: %d" % new_root, + root_blk_num, + ) + root_blk_num = new_root + elif ( + root_blk_num < self.blkdev.reserved + or root_blk_num > self.blkdev.num_blocks + ): + new_root = self.blkdev.num_blocks // 2 + self.log.msg( + Log.INFO, + "Invalid root block number: given %d using guess %d" + % (root_blk_num, new_root), + root_blk_num, + ) + root_blk_num = new_root + else: + # guess root block number + root_blk_num = self.blkdev.num_blocks // 2 + self.log.msg(Log.INFO, "Guessed root block number", root_blk_num) + # read root block + root = RootBlock(self.blkdev, root_blk_num) + root.read() + if not root.valid: + self.log.msg( + Log.INFO, "Root block is not valid -> No file system", root_blk_num + ) + self.root = None # mode without root + return False + else: + self.root = root + return True + + def scan_dir_tree(self): + """Step 3: scan directory structure Return false if structure is not healthy""" - self.block_scan = BlockScan(self.blkdev, self.log, self.dos_type) - self.dir_scan = DirScan(self.block_scan, self.log) - ok = self.dir_scan.scan_tree(self.root.blk_num, progress=self.progress) - self.log.msg(Log.INFO,"Scanned %d directories" % len(self.dir_scan.get_all_dir_infos())) - if self.debug: - self.dir_scan.dump() - - def scan_files(self): - """Step 4: scan through all found files""" - self.file_scan = FileScan(self.block_scan, self.log, self.dos_type) - all_files = self.dir_scan.get_all_file_hdr_blk_infos() - self.log.msg(Log.INFO,"Scanning %d files" % len(all_files)) - self.file_scan.scan_all_files(all_files, progress=self.progress) - if self.debug: - self.file_scan.dump() - - def scan_bitmap(self): - """Step 5: validate block bitmap""" - self.bitmap_scan = BitmapScan(self.block_scan, self.log) - self.bitmap_scan.scan_bitmap(self.root) + self.block_scan = BlockScan(self.blkdev, self.log, self.dos_type) + self.dir_scan = DirScan(self.block_scan, self.log) + ok = self.dir_scan.scan_tree(self.root.blk_num, progress=self.progress) + self.log.msg( + Log.INFO, "Scanned %d directories" % len(self.dir_scan.get_all_dir_infos()) + ) + if self.debug: + self.dir_scan.dump() + + def scan_files(self): + """Step 4: scan through all found files""" + self.file_scan = FileScan(self.block_scan, self.log, self.dos_type) + all_files = self.dir_scan.get_all_file_hdr_blk_infos() + self.log.msg(Log.INFO, "Scanning %d files" % len(all_files)) + self.file_scan.scan_all_files(all_files, progress=self.progress) + if self.debug: + self.file_scan.dump() + + def scan_bitmap(self): + """Step 5: validate block bitmap""" + self.bitmap_scan = BitmapScan(self.block_scan, self.log) + self.bitmap_scan.scan_bitmap(self.root) - def get_summary(self): - """Return (errors, warnings) of log""" - num_errors = self.log.get_num_level(Log.ERROR) - num_warns = self.log.get_num_level(Log.WARN) - return (num_errors, num_warns) + def get_summary(self): + """Return (errors, warnings) of log""" + num_errors = self.log.get_num_level(Log.ERROR) + num_warns = self.log.get_num_level(Log.WARN) + return (num_errors, num_warns) diff --git a/amitools/rom/blizkick.py b/amitools/rom/blizkick.py index 07fbb1a9..5c82eff2 100644 --- a/amitools/rom/blizkick.py +++ b/amitools/rom/blizkick.py @@ -4,8 +4,8 @@ import amitools.binfmt.BinImage as BinImage BKMODULE_ID = 0x707A4E75 -BK_MODULE_ID = 0x4afc -BK_PATCH_ID = 0x4e71 +BK_MODULE_ID = 0x4AFC +BK_PATCH_ID = 0x4E71 class BlizKickModule: @@ -75,14 +75,15 @@ def fix_module(self): # check if we can remove last data segment (contains only version info) if len(segs) == 2 and segs[1].get_type() == BinImage.SEGMENT_TYPE_DATA: data = segs[1].get_data() - if data[:5] == '$VER:': + if data[:5] == "$VER:": self.bin_img.segments = [seg] # test -if __name__ == '__main__': +if __name__ == "__main__": import sys from amitools.binfmt.BinFmt import BinFmt + bfmt = BinFmt() for f in sys.argv[1:]: if bfmt.is_image(f): diff --git a/amitools/rom/kickrom.py b/amitools/rom/kickrom.py index 4c5e7d92..c7409701 100644 --- a/amitools/rom/kickrom.py +++ b/amitools/rom/kickrom.py @@ -12,9 +12,9 @@ class KickRomAccess(RomAccess): EXT_HEADER_SIZE = 0x10 FOOTER_SIZE = 0x18 ROMHDR_SIZE = 8 - ROMHDR_256K = 0x11114ef9 - ROMHDR_512K = 0x11144ef9 - ROMHDR_EXT = 0x11144ef9 + ROMHDR_256K = 0x11114EF9 + ROMHDR_512K = 0x11144EF9 + ROMHDR_EXT = 0x11144EF9 def __init__(self, rom_data): RomAccess.__init__(self, rom_data) @@ -81,14 +81,14 @@ def check_rom_size_field(self): return self.read_rom_size_field() == self.size def check_magic_reset(self): - return self.read_word(0xd0) == 0x4e70 + return self.read_word(0xD0) == 0x4E70 def calc_check_sum(self, skip_off=None): """Check internal kickstart checksum and return True if is correct""" chk_sum = 0 num_longs = self.size // 4 off = 0 - max_u32 = 0xffffffff + max_u32 = 0xFFFFFFFF for i in range(num_longs): val = struct.unpack_from(">I", self.rom_data, off)[0] if off != skip_off: @@ -132,12 +132,12 @@ def write_header(self, jump_addr, kickety_split=False): offset = 0 hdr = self.ROMHDR_512K self.write_long(offset, hdr) - self.write_long(offset+4, jump_addr) + self.write_long(offset + 4, jump_addr) def write_ext_header(self, jump_addr, rom_rev): self.write_header(jump_addr) self.write_word(8, 0) - self.write_word(10, 0xffff) + self.write_word(10, 0xFFFF) self.write_word(12, rom_rev[0]) self.write_word(14, rom_rev[1]) @@ -156,8 +156,7 @@ def write_footer(self): def write_rom_ver_rev(self, rom_rev): """get (ver, rev) version info from ROM""" - return struct.pack_into(">HH", self.rom_data, 12, - rom_rev[0], rom_rev[1]) + return struct.pack_into(">HH", self.rom_data, 12, rom_rev[0], rom_rev[1]) def read_boot_pc(self): """return PC for booting the ROM""" @@ -177,11 +176,12 @@ def read_rom_size_field(self): return self.read_long(off) def get_base_addr(self): - return self.read_boot_pc() & ~0xffff + return self.read_boot_pc() & ~0xFFFF class Loader(object): """Load kick rom images in different formats""" + @classmethod def load(cls, kick_file, rom_key_file=None): raw_img = None @@ -191,7 +191,7 @@ def load(cls, kick_file, rom_key_file=None): raw_img = fh.read() # coded rom? need_key = False - if raw_img[:11] == b'AMIROMTYPE1': + if raw_img[:11] == b"AMIROMTYPE1": rom_img = raw_img[11:] need_key = True else: @@ -218,16 +218,17 @@ def _decode(cls, img, rom_key): # tiny test -if __name__ == '__main__': +if __name__ == "__main__": import sys + args = sys.argv n = len(args) if n > 1: ks_file = args[1] else: - ks_file = 'amiga-os-310-a500.rom' + ks_file = "amiga-os-310-a500.rom" print(ks_file) - ks = Loader.load(ks_file, 'rom.key') + ks = Loader.load(ks_file, "rom.key") kh = KickRomAccess(ks) print("is_kick_rom", kh.is_kick_rom()) print("detect_kick_rom", kh.detect_kick_rom()) diff --git a/amitools/rom/remusfile.py b/amitools/rom/remusfile.py index 2fed8d68..ca220eb2 100644 --- a/amitools/rom/remusfile.py +++ b/amitools/rom/remusfile.py @@ -3,8 +3,7 @@ class RemusRom(object): - def __init__(self, sum_off, chk_sum, size, base_addr, name, short_name, - flags): + def __init__(self, sum_off, chk_sum, size, base_addr, name, short_name, flags): self.sum_off = sum_off self.chk_sum = chk_sum self.size = size @@ -15,17 +14,35 @@ def __init__(self, sum_off, chk_sum, size, base_addr, name, short_name, self.modules = [] def __repr__(self): - return "RemusRom(sum_off=%08x,chk_sum=%08x,size=%08x,base_addr=%08x," \ - "name=%s,short_name=%s,flags=%x)" % \ - (self.sum_off, self.chk_sum, self.size, self.base_addr, - self.name, self.short_name, self.flags) + return ( + "RemusRom(sum_off=%08x,chk_sum=%08x,size=%08x,base_addr=%08x," + "name=%s,short_name=%s,flags=%x)" + % ( + self.sum_off, + self.chk_sum, + self.size, + self.base_addr, + self.name, + self.short_name, + self.flags, + ) + ) def dump(self): - print("(%04x) #%04x @%08x +%08x =%08x %08x: %08x %-24s %s" % - (self.flags, len(self.modules), self.base_addr, self.size, - self.base_addr + self.size, - self.sum_off, self.chk_sum, - self.short_name, self.name)) + print( + "(%04x) #%04x @%08x +%08x =%08x %08x: %08x %-24s %s" + % ( + self.flags, + len(self.modules), + self.base_addr, + self.size, + self.base_addr + self.size, + self.sum_off, + self.chk_sum, + self.short_name, + self.name, + ) + ) for m in self.modules: m.dump() @@ -40,22 +57,21 @@ def __init__(self, flags, relocs, patches, chk_sum, brelocs, fixes): self.fixes = fixes def __repr__(self): - return "RemusRomModuleExtra(relocs=%r,patches=%r,chk_sum=%08x," \ - "brelocs=%r,fixes=%r)" % \ - (self.relocs, self.patches, self.chk_sum, self.brelocs, - self.fixes) + return ( + "RemusRomModuleExtra(relocs=%r,patches=%r,chk_sum=%08x," + "brelocs=%r,fixes=%r)" + % (self.relocs, self.patches, self.chk_sum, self.brelocs, self.fixes) + ) def dump(self): if len(self.relocs) > 0: print(" relocs: ", ",".join(["%08x" % x for x in self.relocs])) if len(self.patches) > 0: - print(" patches:", ",".join( - ["%08x:%08x" % x for x in self.patches])) + print(" patches:", ",".join(["%08x:%08x" % x for x in self.patches])) if len(self.brelocs) > 0: print(" brelocs:", ",".join(["%08x" % x for x in self.brelocs])) if len(self.fixes) > 0: - print(" fixes: ", ",".join( - ["%08x:%08x" % x for x in self.fixes])) + print(" fixes: ", ",".join(["%08x:%08x" % x for x in self.fixes])) if self.chk_sum: print(" chk_sum: %08x" % self.chk_sum) @@ -69,17 +85,22 @@ def __init__(self, name, offset, size, extra_off): self.extra = None def __repr__(self): - return "RemusRomModule(name=%s,offset=%08x,size=%08x,extra_off=%08x)" \ - % (self.name, self.offset, self.size, self.extra_off) + return "RemusRomModule(name=%s,offset=%08x,size=%08x,extra_off=%08x)" % ( + self.name, + self.offset, + self.size, + self.extra_off, + ) def dump(self): if self.extra: flags = self.extra.flags else: flags = 0 - print(" @%08x +%08x =%08x %s (%02x)" % - (self.offset, self.size, self.offset + self.size, self.name, - flags)) + print( + " @%08x +%08x =%08x %s (%02x)" + % (self.offset, self.size, self.offset + self.size, self.name, flags) + ) if self.extra: self.extra.dump() @@ -98,8 +119,7 @@ def load(self, path): # check header len_hdr = self._read_long() if len_hdr != self.header: - raise IOError("Wrong header! %08x != %08x" % - (self.header, len_hdr)) + raise IOError("Wrong header! %08x != %08x" % (self.header, len_hdr)) # read version self.version = self._read_long() self.path = path @@ -126,14 +146,14 @@ def _read_string(self, pos): break res.append(self.data[pos]) pos += 1 - return bytes(res).decode('latin-1') + return bytes(res).decode("latin-1") class RemusSplitFile(RemusFile): - u32_max = 0xffffffff + u32_max = 0xFFFFFFFF def __init__(self): - RemusFile.__init__(self, 0x524d5346) + RemusFile.__init__(self, 0x524D5346) self.roms = [] def load(self, path): @@ -172,8 +192,7 @@ def _read_rom(self, extra_offs, string_offs): flags = 0 name = self._read_string(name_off) short_name = self._read_string(short_name_off) - rom = RemusRom(sum_off, chk_sum, size, base_addr, - name, short_name, flags) + rom = RemusRom(sum_off, chk_sum, size, base_addr, name, short_name, flags) # store string_offs.add(name_off) string_offs.add(short_name_off) @@ -209,7 +228,7 @@ def _read_extras(self, extra_offs, end_off): FLAG_SHORT_BCPL_RELOCS = 0x10 FLAG_CHK_SUM = 0x40 FLAG_FIXES = 0x80 - FLAG_MASK = 0xdf + FLAG_MASK = 0xDF # parse extras extra_map = {} for extra_off in extra_offs: @@ -269,8 +288,7 @@ def _read_extras(self, extra_offs, end_off): if flags & FLAG_CHK_SUM: chk_sum = self._read_long() # create extra - e = RemusRomModuleExtra( - flags, relocs, patches, chk_sum, brelocs, fixes) + e = RemusRomModuleExtra(flags, relocs, patches, chk_sum, brelocs, fixes) extra_map[extra_off] = e # check end of record # if self.offset not in extra_offs and self.offset != end_off: @@ -306,18 +324,22 @@ def __init__(self, count, bogus, chk_sum, name): self.name = name def __repr__(self): - return "RemusIdEntry(count=%x,bogus=%08x,chk_sum=%08x,name=%s" % \ - (self.count, self.bogus, self.chk_sum, self.name) + return "RemusIdEntry(count=%x,bogus=%08x,chk_sum=%08x,name=%s" % ( + self.count, + self.bogus, + self.chk_sum, + self.name, + ) class RemusIdFile(RemusFile): def __init__(self): - RemusFile.__init__(self, 0x524d4944) + RemusFile.__init__(self, 0x524D4944) self.entries = [] def load(self, path): RemusFile.load(self, path) - u16_max = 0xffff + u16_max = 0xFFFF # loop: new rom while True: # parse rom entry @@ -333,8 +355,7 @@ def load(self, path): def dump(self): for e in self.entries: - print("%04x %08x %08x %s" % - (e.count, e.bogus, e.chk_sum, e.name)) + print("%04x %08x %08x %s" % (e.count, e.bogus, e.chk_sum, e.name)) class RemusFileSet(object): @@ -377,7 +398,7 @@ def get_roms(self): return roms -if __name__ == '__main__': +if __name__ == "__main__": import sys from .kickrom import Loader, KickRomAccess diff --git a/amitools/rom/residentscan.py b/amitools/rom/residentscan.py index b73ea34c..fb2cc8ae 100644 --- a/amitools/rom/residentscan.py +++ b/amitools/rom/residentscan.py @@ -3,16 +3,16 @@ RTC_MATCHWORD = 0x4AFC -RTF_AUTOINIT = (1 << 7) -RTF_AFTERDOS = (1 << 2) -RTF_SINGLETASK = (1 << 1) -RTF_COLDSTART = (1 << 0) +RTF_AUTOINIT = 1 << 7 +RTF_AFTERDOS = 1 << 2 +RTF_SINGLETASK = 1 << 1 +RTF_COLDSTART = 1 << 0 flag_names = { RTF_AUTOINIT: "RTF_AUTOINIT", RTF_AFTERDOS: "RTF_AFTERDOS", RTF_SINGLETASK: "RTF_SINGLETASK", - RTF_COLDSTART: "RTF_COLDSTART" + RTF_COLDSTART: "RTF_COLDSTART", } NT_UNKNOWN = 0 @@ -26,13 +26,14 @@ NT_TASK: "NT_TASK", NT_DEVICE: "NT_DEVICE", NT_RESOURCE: "NT_RESOURCE", - NT_LIBRARY: "NT_LIBRARY" + NT_LIBRARY: "NT_LIBRARY", } class Resident: - def __init__(self, off, flags, version, node_type, pri, name, id_string, - init_off, skip_off): + def __init__( + self, off, flags, version, node_type, pri, name, id_string, init_off, skip_off + ): self.off = off self.flags = flags self.version = version @@ -44,10 +45,21 @@ def __init__(self, off, flags, version, node_type, pri, name, id_string, self.skip_off = skip_off def __repr__(self): - return "Resident(@off=%08x,flags=%02x,version=%d,node_type=%d," \ - "pri=%d,name=%s,id_string=%s,init_off=%08x,skip_off=%08x)" % \ - (self.off, self.flags, self.version, self.node_type, self.pri, - self.name, self.id_string, self.init_off, self.skip_off) + return ( + "Resident(@off=%08x,flags=%02x,version=%d,node_type=%d," + "pri=%d,name=%s,id_string=%s,init_off=%08x,skip_off=%08x)" + % ( + self.off, + self.flags, + self.version, + self.node_type, + self.pri, + self.name, + self.id_string, + self.init_off, + self.skip_off, + ) + ) def get_flags_strings(self): f = self.flags @@ -71,26 +83,27 @@ def parse(cls, access, off, base_addr): if mw != RTC_MATCHWORD: raise ValueError("No RTC_MATCHWORD at resident offset!") # +2 RT_MATCHTAG - tag_ptr = access.read_long(off+2) + tag_ptr = access.read_long(off + 2) if tag_ptr != base_addr + off: raise ValueError("Wrong MatchTag pointer in resident!") # +6 RT_ENDSKIP - end_skip_ptr = access.read_long(off+6) + end_skip_ptr = access.read_long(off + 6) end_skip_off = end_skip_ptr - base_addr # +10..13 RT_FLAGS, RT_VERSION, RT_TYPE, RT_PRI - flags = access.read_byte(off+10) - version = access.read_byte(off+11) - rtype = access.read_byte(off+12) - pri = access.read_sbyte(off+13) + flags = access.read_byte(off + 10) + version = access.read_byte(off + 11) + rtype = access.read_byte(off + 12) + pri = access.read_sbyte(off + 13) # +14: RT_NAME - name = cls._parse_cstr(access, off+14, base_addr) + name = cls._parse_cstr(access, off + 14, base_addr) # +18: RT_IDSTRING - id_string = cls._parse_cstr(access, off+18, base_addr) + id_string = cls._parse_cstr(access, off + 18, base_addr) # +22: RT_INIT - init_ptr = access.read_long(off+22) + init_ptr = access.read_long(off + 22) init_off = init_ptr - base_addr - return Resident(off, flags, version, rtype, pri, name, id_string, - init_off, end_skip_off) + return Resident( + off, flags, version, rtype, pri, name, id_string, init_off, end_skip_off + ) @classmethod def _parse_cstr(cls, access, off, base_addr): @@ -106,11 +119,10 @@ def _parse_cstr(cls, access, off, base_addr): break res.append(c) str_off += 1 - return bytes(res).decode('latin-1') + return bytes(res).decode("latin-1") class ResidentScan: - def __init__(self, rom_data, base_addr=0): self.access = RomAccess(rom_data) self.base_addr = base_addr @@ -136,10 +148,10 @@ def guess_base_addr(self): return None base_map = {} for off in offs: - tag_ptr = self.access.read_long(off+2) - tag_off = tag_ptr & 0xffff + tag_ptr = self.access.read_long(off + 2) + tag_off = tag_ptr & 0xFFFF if tag_off == off: - base_addr = tag_ptr & ~0xffff + base_addr = tag_ptr & ~0xFFFF if base_addr not in base_map: base_map[base_addr] = 1 else: @@ -158,7 +170,7 @@ def get_all_resident_pos(self): res = [] for off in offs: # check tag ptr - tag_ptr = self.access.read_long(off+2) + tag_ptr = self.access.read_long(off + 2) if tag_ptr == self.base_addr + off: res.append(off) return res @@ -167,7 +179,7 @@ def is_resident_at(self, off): mw = self.access.read_word(off) if mw != RTC_MATCHWORD: return False - tag_ptr = self.access.read_long(off+2) + tag_ptr = self.access.read_long(off + 2) return tag_ptr == self.base_addr + off def get_resident(self, off): diff --git a/amitools/rom/rombuilder.py b/amitools/rom/rombuilder.py index 93a831eb..794c4eb9 100644 --- a/amitools/rom/rombuilder.py +++ b/amitools/rom/rombuilder.py @@ -52,8 +52,8 @@ def get_size(self): return self.skip + 8 def get_data(self, addr): - data = chr(0xff) * self.skip - hdr = struct.pack(">II", 0x11114ef9, self.jmp_addr) + data = chr(0xFF) * self.skip + hdr = struct.pack(">II", 0x11114EF9, self.jmp_addr) return data + hdr @@ -70,7 +70,7 @@ def get_data(self, addr): class RomBuilder: - def __init__(self, size=512, base_addr=0xf80000, fill_byte=0xff): + def __init__(self, size=512, base_addr=0xF80000, fill_byte=0xFF): self.size = size # in KiB self.base_addr = base_addr self.fill_byte = fill_byte @@ -114,7 +114,7 @@ def build_file_list(self, names): files = [] for mod in names: # is an index file? - if mod.endswith('.txt'): + if mod.endswith(".txt"): base_path = os.path.dirname(mod) with open(mod, "r") as fh: for line in fh: @@ -145,7 +145,7 @@ def build_rom(self): off = self.rom_off for mod in self.modules: n = mod.get_size() - rom_data[off: off+n] = mod.get_data(addr) + rom_data[off : off + n] = mod.get_data(addr) off += n addr += n # fill empty space @@ -157,8 +157,7 @@ def build_rom(self): class KickRomBuilder(RomBuilder): - def __init__(self, size, kickety_split=True, rom_ver=None, - **kw_args): + def __init__(self, size, kickety_split=True, rom_ver=None, **kw_args): RomBuilder.__init__(self, size, **kw_args) self.rom_ver = rom_ver # do we need a rom header at 256k border? (the original ROMs do this) @@ -180,8 +179,7 @@ def __init__(self, size, kickety_split=True, rom_ver=None, def cross_kickety_split(self, num_bytes): if self.kickety_split: new_off = self.data_off + num_bytes - return self.data_off < self.split_offset and \ - new_off > self.split_offset + return self.data_off < self.split_offset and new_off > self.split_offset else: return False @@ -208,8 +206,9 @@ def build_rom(self): class ExtRomBuilder(RomBuilder): - def __init__(self, size, rom_ver=None, add_footer=False, - kick_addr=0xf80000, **kw_args): + def __init__( + self, size, rom_ver=None, add_footer=False, kick_addr=0xF80000, **kw_args + ): RomBuilder.__init__(self, size, **kw_args) # kick addr for jump self.kick_addr = kick_addr @@ -230,7 +229,7 @@ def build_rom(self): rom_data = RomBuilder.build_rom(self) # write a header kh = KickRomAccess(rom_data) - kh.write_ext_header(self.kick_addr+2, self.rom_ver) + kh.write_ext_header(self.kick_addr + 2, self.rom_ver) # write footer if self.add_footer: kh.write_ext_footer() diff --git a/amitools/rom/rompatcher.py b/amitools/rom/rompatcher.py index ca731b5c..d81de13e 100644 --- a/amitools/rom/rompatcher.py +++ b/amitools/rom/rompatcher.py @@ -21,30 +21,35 @@ def _ensure_arg(self, args, arg_name): class OneMegRomPatch(RomPatch): def __init__(self): RomPatch.__init__( - self, "1mb_rom", "Patch Kickstart to support ext ROM with 512 KiB") + self, "1mb_rom", "Patch Kickstart to support ext ROM with 512 KiB" + ) def apply_patch(self, access, args=None): off = 8 while off < 0x400: v = access.read_long(off) - if v == 0xf80000: - v4 = access.read_long(off+4) - v8 = access.read_long(off+8) - vc = access.read_long(off+0xc) - v10 = access.read_long(off+0x10) - if v4 == 0x1000000 and v8 == 0xf00000 and \ - vc == 0xf80000 and v10 == 0xffffffff: - vp8 = access.read_long(off-8) - if vp8 == 0xf80000: - access.write_long(off-4, 0x1000000) - access.write_long(off, 0xe00000) - access.write_long(off+4, 0xe80000) + if v == 0xF80000: + v4 = access.read_long(off + 4) + v8 = access.read_long(off + 8) + vc = access.read_long(off + 0xC) + v10 = access.read_long(off + 0x10) + if ( + v4 == 0x1000000 + and v8 == 0xF00000 + and vc == 0xF80000 + and v10 == 0xFFFFFFFF + ): + vp8 = access.read_long(off - 8) + if vp8 == 0xF80000: + access.write_long(off - 4, 0x1000000) + access.write_long(off, 0xE00000) + access.write_long(off + 4, 0xE80000) logging.info("@%08x Variant A", off) return True else: - access.write_long(off, 0xf00000) - access.write_long(off+8, 0xe00000) - access.write_long(off+0xc, 0xe80000) + access.write_long(off, 0xF00000) + access.write_long(off + 8, 0xE00000) + access.write_long(off + 0xC, 0xE80000) logging.info("@%08x Variant B", off) return True off += 2 @@ -54,9 +59,12 @@ def apply_patch(self, access, args=None): class BootConRomPatch(RomPatch): def __init__(self): - RomPatch.__init__(self, "boot_con", "Set the boot console", - {"name": "name of the new console," - " e.g. 'CON:MyConsole'"}) + RomPatch.__init__( + self, + "boot_con", + "Set the boot console", + {"name": "name of the new console," " e.g. 'CON:MyConsole'"}, + ) def apply_patch(self, access, args): # search CON: @@ -66,7 +74,7 @@ def apply_patch(self, access, args): logging.error("console not found!") return False # find terminator - pos = data.find(b'\0', off) + pos = data.find(b"\0", off) if pos == -1: logging.error("no console end found!") return False @@ -75,17 +83,16 @@ def apply_patch(self, access, args): con_old = data[off:pos] logging.info("@%08x: +%08x old='%s'" % (off, con_old_len, con_old)) # check new string - if 'name' in args: - con_new = args['name'].encode('latin-1') + if "name" in args: + con_new = args["name"].encode("latin-1") con_new_len = len(con_new) if con_new_len > con_old_len: - logging.error("new console name is too long (>%d)!", - con_old_len) + logging.error("new console name is too long (>%d)!", con_old_len) return False # pad and write to rom pad_len = con_old_len - con_new_len + 1 - con_new += b'\0' * pad_len - data[off:pos+1] = con_new + con_new += b"\0" * pad_len + data[off : pos + 1] = con_new logging.info("new='%s'" % (con_new_len)) return True @@ -93,10 +100,7 @@ def apply_patch(self, access, args): class RomPatcher: # list of all available patch classes - patches = [ - OneMegRomPatch(), - BootConRomPatch() - ] + patches = [OneMegRomPatch(), BootConRomPatch()] def __init__(self, rom): self.access = RomAccess(rom) diff --git a/amitools/rom/romsplitter.py b/amitools/rom/romsplitter.py index dbf43b18..d2d11592 100644 --- a/amitools/rom/romsplitter.py +++ b/amitools/rom/romsplitter.py @@ -7,10 +7,10 @@ from amitools.binfmt.BinImage import ( BinImage, Segment, - Relocations, + Relocations, Reloc, BIN_IMAGE_TYPE_HUNK, - SEGMENT_TYPE_CODE + SEGMENT_TYPE_CODE, ) @@ -50,20 +50,28 @@ def find_rom(self, rom_path): def print_rom(self, out, show_entries=False): rom = self.remus_rom - out("rom @%06x +%06x sum=%08x@%08x %s" % - (rom.base_addr, rom.size, rom.chk_sum, rom.sum_off, rom.name)) + out( + "rom @%06x +%06x sum=%08x@%08x %s" + % (rom.base_addr, rom.size, rom.chk_sum, rom.sum_off, rom.name) + ) if show_entries: for module in rom.modules: self.print_entry(out, module) def print_entry(self, out, entry): - out(" @%06x +%06x =%06x (r:%5d,f:%2d,p:%2d) sum=%08x %s" % - (entry.offset, entry.size, entry.offset+entry.size, - len(entry.extra.relocs), - len(entry.extra.fixes), - len(entry.extra.patches), - entry.extra.chk_sum, - entry.name)) + out( + " @%06x +%06x =%06x (r:%5d,f:%2d,p:%2d) sum=%08x %s" + % ( + entry.offset, + entry.size, + entry.offset + entry.size, + len(entry.extra.relocs), + len(entry.extra.fixes), + len(entry.extra.patches), + entry.extra.chk_sum, + entry.name, + ) + ) def print_entries(self, out, entries): for e in entries: @@ -81,7 +89,7 @@ def query_entries(self, query_str): def extract_entry(self, entry, fixes=True, patches=False): """return data, relocs""" - data = self.rom_data[entry.offset:entry.offset+entry.size] + data = self.rom_data[entry.offset : entry.offset + entry.size] extra = entry.extra relocs = extra.relocs entry_addr = self.remus_rom.base_addr + entry.offset @@ -102,8 +110,9 @@ def _clean_relocs(self, data, relocs, base_addr): for off in relocs: addr = struct.unpack_from(">I", data, off)[0] if addr < base_addr: - raise ValueError("Invalid relocatable address: %08x base=%08x" - % (addr, base_addr)) + raise ValueError( + "Invalid relocatable address: %08x base=%08x" % (addr, base_addr) + ) addr -= base_addr struct.pack_into(">I", data, off, addr) return data diff --git a/amitools/scan/ADFSScanner.py b/amitools/scan/ADFSScanner.py index 426ccd35..56881f25 100644 --- a/amitools/scan/ADFSScanner.py +++ b/amitools/scan/ADFSScanner.py @@ -5,74 +5,84 @@ from amitools.fs.blkdev.BlkDevFactory import BlkDevFactory from amitools.fs.ADFSVolume import ADFSVolume + class ADFSScanner: + def __init__(self): + self.factory = BlkDevFactory() - def __init__(self): - self.factory = BlkDevFactory() + def can_handle(self, scan_file): + base_name = scan_file.get_basename().lower() + for ext in self.factory.valid_extensions: + if base_name.endswith(ext): + return True + return False - def can_handle(self, scan_file): - base_name = scan_file.get_basename().lower() - for ext in self.factory.valid_extensions: - if base_name.endswith(ext): - return True - return False + def handle(self, scan_file, scanner): + if scan_file.is_seekable(): + sf = scan_file + else: + sf = scanner.promote_scan_file(scan_file, seekable=True) + # create blkdev + blkdev = self.factory.open(sf.get_local_path(), fobj=sf.get_fobj()) + # create volume + volume = ADFSVolume(blkdev) + volume.open() + # scan volume + node = volume.get_root_dir() + ok = self._scan_node(sf, scanner, node) + # done + volume.close() + blkdev.close() + return ok - def handle(self, scan_file, scanner): - if scan_file.is_seekable(): - sf = scan_file - else: - sf = scanner.promote_scan_file(scan_file, seekable=True) - # create blkdev - blkdev = self.factory.open(sf.get_local_path(), fobj=sf.get_fobj()) - # create volume - volume = ADFSVolume(blkdev) - volume.open() - # scan volume - node = volume.get_root_dir() - ok = self._scan_node(sf, scanner, node) - # done - volume.close() - blkdev.close() - return ok + def _scan_node(self, scan_file, scanner, node): + if node.is_dir(): + # recurse into dir + entries = node.get_entries() + for e in entries: + ok = self._scan_node(scan_file, scanner, e) + if not ok: + return False + return True + elif node.is_file(): + # read file in ram fobj + data = node.get_file_data() + node.flush() + size = len(data) + path = node.get_node_path_name().get_unicode() + fobj = io.StringIO(data) + sf = scan_file.create_sub_path(path, fobj, size, True, False) + ok = scanner.scan_obj(sf) + sf.close() + return True - def _scan_node(self, scan_file, scanner, node): - if node.is_dir(): - # recurse into dir - entries = node.get_entries() - for e in entries: - ok = self._scan_node(scan_file, scanner, e) - if not ok: - return False - return True - elif node.is_file(): - # read file in ram fobj - data = node.get_file_data() - node.flush() - size = len(data) - path = node.get_node_path_name().get_unicode() - fobj = io.StringIO(data) - sf = scan_file.create_sub_path(path, fobj, size, True, False) - ok = scanner.scan_obj(sf) - sf.close() - return True # mini test -if __name__ == '__main__': - import sys - from .FileScanner import FileScanner +if __name__ == "__main__": + import sys + from .FileScanner import FileScanner + + ifs = ["*.txt"] + + def handler(scan_file): + print(scan_file) + return True + + def skip_handler(scan_file): + print(("SKIP:", scan_file)) + return True + + def error_handler(scan_file, error): + print(("FAILED:", scan_file, error)) + raise error - ifs = ['*.txt'] - def handler(scan_file): - print(scan_file) - return True - def skip_handler(scan_file): - print(("SKIP:", scan_file)) - return True - def error_handler(scan_file, error): - print(("FAILED:", scan_file, error)) - raise error - scanners = [ADFSScanner()] - fs = FileScanner(handler, ignore_filters=ifs, error_handler=error_handler, - scanners=scanners, skip_handler=skip_handler) - for a in sys.argv[1:]: - fs.scan(a) + scanners = [ADFSScanner()] + fs = FileScanner( + handler, + ignore_filters=ifs, + error_handler=error_handler, + scanners=scanners, + skip_handler=skip_handler, + ) + for a in sys.argv[1:]: + fs.scan(a) diff --git a/amitools/scan/ArchiveScanner.py b/amitools/scan/ArchiveScanner.py index c2fec424..3e816e34 100644 --- a/amitools/scan/ArchiveScanner.py +++ b/amitools/scan/ArchiveScanner.py @@ -1,116 +1,123 @@ - - import zipfile import io # optional lhafile try: - import lhafile + import lhafile except ImportError: - lhafile = None + lhafile = None class ArchiveScanner: - """Scan archives and visit all files""" - - exts = [] # valid file extensions - - def _create_archive_obj(self, fobj, scanner): - pass - - def _create_entry_scan_file(self, arc, info, sf): - pass - - def can_handle(self, scan_file): - base_name = scan_file.get_basename().lower() - for ext in self.exts: - if base_name.endswith(ext): + """Scan archives and visit all files""" + + exts = [] # valid file extensions + + def _create_archive_obj(self, fobj, scanner): + pass + + def _create_entry_scan_file(self, arc, info, sf): + pass + + def can_handle(self, scan_file): + base_name = scan_file.get_basename().lower() + for ext in self.exts: + if base_name.endswith(ext): + return True + return False + + def handle(self, scan_file, scanner): + """scan a given archive file""" + # ensure a seekable fobj + if not scan_file.is_seekable(): + sf = scanner.promote_scan_file(scan_file, seekable=True) + else: + sf = scan_file + # create archive obj + arc = self._create_archive_obj(sf, scanner) + if arc is None: + return True # simply ignore + # get infos + infos = arc.infolist() + for info in infos: + if info.file_size > 0: + sf = self._create_entry_scan_file(arc, info, scan_file) + ok = scanner.scan_obj(sf) + sf.close() + if not ok: + return False return True - return False - - def handle(self, scan_file, scanner): - """scan a given archive file""" - # ensure a seekable fobj - if not scan_file.is_seekable(): - sf = scanner.promote_scan_file(scan_file, seekable=True) - else: - sf = scan_file - # create archive obj - arc = self._create_archive_obj(sf, scanner) - if arc is None: - return True # simply ignore - # get infos - infos = arc.infolist() - for info in infos: - if info.file_size > 0: - sf = self._create_entry_scan_file(arc, info, scan_file) - ok = scanner.scan_obj(sf) - sf.close() - if not ok: - return False - return True class ZipScanner(ArchiveScanner): - """Scan .zip Archives""" + """Scan .zip Archives""" - exts = [".zip"] + exts = [".zip"] - def _create_archive_obj(self, sf, scanner): - try: - fobj = sf.get_fobj() - return zipfile.ZipFile(fobj, "r") - except Exception as e: - scanner.warning(sf, "error reading archive: %s" % e) + def _create_archive_obj(self, sf, scanner): + try: + fobj = sf.get_fobj() + return zipfile.ZipFile(fobj, "r") + except Exception as e: + scanner.warning(sf, "error reading archive: %s" % e) - def _create_entry_scan_file(self, arc, info, scan_file): - name = info.filename - fobj = arc.open(info) - size = info.file_size - # its a non-seekable file - return scan_file.create_sub_path(name, fobj, size, False, False) + def _create_entry_scan_file(self, arc, info, scan_file): + name = info.filename + fobj = arc.open(info) + size = info.file_size + # its a non-seekable file + return scan_file.create_sub_path(name, fobj, size, False, False) class LhaScanner(ArchiveScanner): - """Scan .lha/.lzh Archives""" + """Scan .lha/.lzh Archives""" - exts = [".lha", ".lzh"] + exts = [".lha", ".lzh"] - def _create_archive_obj(self, sf, scanner): - if lhafile: - try: - fobj = sf.get_fobj() - return lhafile.LhaFile(fobj, "r") - except Exception as e: - scanner.warning(sf, "error reading archive: %s" % e) - else: - scanner.warning(sf, "can't handle archive. missing 'lhafile' module.") + def _create_archive_obj(self, sf, scanner): + if lhafile: + try: + fobj = sf.get_fobj() + return lhafile.LhaFile(fobj, "r") + except Exception as e: + scanner.warning(sf, "error reading archive: %s" % e) + else: + scanner.warning(sf, "can't handle archive. missing 'lhafile' module.") - def _create_entry_scan_file(self, arc, info, scan_file): - data = arc.read(info.filename) - fobj = io.StringIO(data) - size = info.file_size - name = info.filename - return scan_file.create_sub_path(name, fobj, size, True, False) + def _create_entry_scan_file(self, arc, info, scan_file): + data = arc.read(info.filename) + fobj = io.StringIO(data) + size = info.file_size + name = info.filename + return scan_file.create_sub_path(name, fobj, size, True, False) # mini test -if __name__ == '__main__': - import sys - from .FileScanner import FileScanner - - ifs = ['*.txt'] - def handler(scan_file): - print(scan_file) - return True - def skip_handler(scan_file): - print("SKIP:", scan_file) - return True - def error_handler(scan_file, error): - print("FAILED:", scan_file, error) - raise error - scanners = [LhaScanner(), ZipScanner()] - fs = FileScanner(handler, ignore_filters=ifs, error_handler=error_handler, - scanners=scanners, skip_handler=skip_handler) - for a in sys.argv[1:]: - fs.scan(a) +if __name__ == "__main__": + import sys + from .FileScanner import FileScanner + + ifs = ["*.txt"] + + def handler(scan_file): + print(scan_file) + return True + + def skip_handler(scan_file): + print("SKIP:", scan_file) + return True + + def error_handler(scan_file, error): + print("FAILED:", scan_file, error) + raise error + + scanners = [LhaScanner(), ZipScanner()] + fs = FileScanner( + handler, + ignore_filters=ifs, + error_handler=error_handler, + scanners=scanners, + skip_handler=skip_handler, + ) + for a in sys.argv[1:]: + fs.scan(a) diff --git a/amitools/scan/FileScanner.py b/amitools/scan/FileScanner.py index 0cf2badd..98ead5b6 100644 --- a/amitools/scan/FileScanner.py +++ b/amitools/scan/FileScanner.py @@ -1,132 +1,142 @@ # scan a set of file - import os import fnmatch import tempfile from .ScanFile import ScanFile -class FileScanner: - def __init__(self, handler=None, ignore_filters=None, scanners=None, - error_handler=None, ram_bytes=10 * 1024 * 1024, - skip_handler=None, warning_handler=None): - """the handler will be called with all the scanned files. +class FileScanner: + def __init__( + self, + handler=None, + ignore_filters=None, + scanners=None, + error_handler=None, + ram_bytes=10 * 1024 * 1024, + skip_handler=None, + warning_handler=None, + ): + """the handler will be called with all the scanned files. the optional ignore_filters contains a list of glob pattern to ignore file names""" - self.handler = handler - self.error_handler = error_handler - self.warning_handler = warning_handler - self.skip_handler = skip_handler - self.ignore_filters = ignore_filters - self.scanners = scanners - self.ram_bytes = ram_bytes - - def scan(self, path): - """start scanning a path. either a file or directory""" - if os.path.isdir(path): - return self._scan_dir(path) - elif os.path.isfile(path): - return self._scan_file(path) - else: - return True - - def scan_obj(self, scan_file, check_ignore=True): - """pass a ScanFile to check""" - if check_ignore and self._is_ignored(scan_file.get_local_path()): - return False - # does a scanner match? - sf = scan_file - sc = self.scanners - if sc is not None: - for s in sc: - if s.can_handle(sf): - ok = s.handle(sf, self) - sf.close() - return ok - # no match call user's handler - ok = self._call_handler(sf) - sf.close() - return ok - - def _scan_dir(self, path): - if self._is_ignored(path): - return True - for root, dirs, files in os.walk(path): - for name in files: - if not self._scan_file(os.path.join(root,name)): - return False - for name in dirs: - if not self._scan_dir(os.path.join(root,name)): - return False - return True - - def _scan_file(self, path): - if self._is_ignored(path): - return True - # build a scan file - try: - size = os.path.getsize(path) - with open(path, "rb") as fobj: - sf = ScanFile(path, fobj, size, True, True) - return self.scan_obj(sf, False) - except IOError as e: - eh = self.error_handler - if eh is not None: - sf = ScanFile(path, None, 0) - return eh(sf, e) - else: - # ignore error + self.handler = handler + self.error_handler = error_handler + self.warning_handler = warning_handler + self.skip_handler = skip_handler + self.ignore_filters = ignore_filters + self.scanners = scanners + self.ram_bytes = ram_bytes + + def scan(self, path): + """start scanning a path. either a file or directory""" + if os.path.isdir(path): + return self._scan_dir(path) + elif os.path.isfile(path): + return self._scan_file(path) + else: + return True + + def scan_obj(self, scan_file, check_ignore=True): + """pass a ScanFile to check""" + if check_ignore and self._is_ignored(scan_file.get_local_path()): + return False + # does a scanner match? + sf = scan_file + sc = self.scanners + if sc is not None: + for s in sc: + if s.can_handle(sf): + ok = s.handle(sf, self) + sf.close() + return ok + # no match call user's handler + ok = self._call_handler(sf) + sf.close() + return ok + + def _scan_dir(self, path): + if self._is_ignored(path): + return True + for root, dirs, files in os.walk(path): + for name in files: + if not self._scan_file(os.path.join(root, name)): + return False + for name in dirs: + if not self._scan_dir(os.path.join(root, name)): + return False return True - def _is_ignored(self, path): - if self.ignore_filters is not None: - base = os.path.basename(path) - for f in self.ignore_filters: - if fnmatch.fnmatch(base, f): - return True - return False - - def _call_handler(self, scan_file): - if self.handler is not None: - return self.handler(scan_file) - else: - return True - - def _call_skip_handler(self, scan_file): - if self.skip_handler is not None: - return self.skip_handler(scan_file) - else: - return True - - def promote_scan_file(self, scan_file, seekable=False, file_based=False): - if not seekable and not file_base: - return scan_file - fb = file_based - if not fb and seekable and scan_file.size > self.ram_bytes: - fb = True - sf = scan_file.create_clone(seekable, fb) - scan_file.close() - return sf - - def warn(self, scan_file, msg): - wh = self.warning_handler - if wh is not None: - wh(scan_file, msg) + def _scan_file(self, path): + if self._is_ignored(path): + return True + # build a scan file + try: + size = os.path.getsize(path) + with open(path, "rb") as fobj: + sf = ScanFile(path, fobj, size, True, True) + return self.scan_obj(sf, False) + except IOError as e: + eh = self.error_handler + if eh is not None: + sf = ScanFile(path, None, 0) + return eh(sf, e) + else: + # ignore error + return True + + def _is_ignored(self, path): + if self.ignore_filters is not None: + base = os.path.basename(path) + for f in self.ignore_filters: + if fnmatch.fnmatch(base, f): + return True + return False + + def _call_handler(self, scan_file): + if self.handler is not None: + return self.handler(scan_file) + else: + return True + + def _call_skip_handler(self, scan_file): + if self.skip_handler is not None: + return self.skip_handler(scan_file) + else: + return True + + def promote_scan_file(self, scan_file, seekable=False, file_based=False): + if not seekable and not file_base: + return scan_file + fb = file_based + if not fb and seekable and scan_file.size > self.ram_bytes: + fb = True + sf = scan_file.create_clone(seekable, fb) + scan_file.close() + return sf + + def warn(self, scan_file, msg): + wh = self.warning_handler + if wh is not None: + wh(scan_file, msg) # mini test -if __name__ == '__main__': - import sys - ifs = ['*.txt'] - def handler(scan_file): - print(scan_file) - return True - def error_handler(scan_file, error): - print("FAILED:", scan_file, error) - raise error - fs = FileScanner(handler, ignore_filters=ifs, error_handler=error_handler) - for a in sys.argv[1:]: - fs.scan(a) +if __name__ == "__main__": + import sys + + ifs = ["*.txt"] + + def handler(scan_file): + print(scan_file) + return True + + def error_handler(scan_file, error): + print("FAILED:", scan_file, error) + raise error + + fs = FileScanner(handler, ignore_filters=ifs, error_handler=error_handler) + for a in sys.argv[1:]: + fs.scan(a) diff --git a/amitools/scan/ScanFile.py b/amitools/scan/ScanFile.py index fa5858d4..f4285714 100644 --- a/amitools/scan/ScanFile.py +++ b/amitools/scan/ScanFile.py @@ -1,76 +1,79 @@ - - import os import io -class ScanFile: - """a file that is currently scanned""" - - def __init__(self, path, fobj, size, seekable=True, file_based=True): - """create a scan file from a host file object""" - if type(path) is list: - self.paths = path - else: - self.paths = [path] - self.fobj = fobj - self.size = size - self.seekable = seekable - self.file_based = file_based - - def __str__(self): - return "[%s:%d, seekable=%s, file_based=%s, fobj=%s]" % \ - (self.get_path(), self.size, self.seekable, self.file_based, - self.fobj.__class__.__name__) - - def __repr__(self): - return self.__str__() - - def is_seekable(self): - return self.seekable - - def is_file_based(self): - return self.file_based - def get_path(self): - return ";".join(self.paths) - - def get_local_path(self): - return self.paths[-1] - - def get_basename(self): - return os.path.basename(self.paths[-1]) - - def get_fobj(self): - return self.fobj - - def is_host_path(self): - return len(self.paths) == 1 - - def close(self): - self.fobj.close() - - def create_sub_path(self, sub_path, fobj, size, seekable, file_based): - paths = self.paths[:] - paths.append(sub_path) - return ScanFile(paths, fobj, size, seekable, file_based) - - def create_clone(self, seekable, file_based): - src_fobj = self.fobj - # create a temp file - if file_based: - fobj = tempfile.TemporaryFile() - # copy original file - blk_size = 4096 - while True: - buf = src_fobj.read(blk_size) - if len(buf) == 0: - break - fobj.write(buf) - # create a string buffer - else: - data = src_fobj.read() - fobj = io.StringIO(data) - # close old scan file - src_fobj.close() - # create promoted file - return ScanFile(self.paths, fobj, self.size, seekable, file_based) +class ScanFile: + """a file that is currently scanned""" + + def __init__(self, path, fobj, size, seekable=True, file_based=True): + """create a scan file from a host file object""" + if type(path) is list: + self.paths = path + else: + self.paths = [path] + self.fobj = fobj + self.size = size + self.seekable = seekable + self.file_based = file_based + + def __str__(self): + return "[%s:%d, seekable=%s, file_based=%s, fobj=%s]" % ( + self.get_path(), + self.size, + self.seekable, + self.file_based, + self.fobj.__class__.__name__, + ) + + def __repr__(self): + return self.__str__() + + def is_seekable(self): + return self.seekable + + def is_file_based(self): + return self.file_based + + def get_path(self): + return ";".join(self.paths) + + def get_local_path(self): + return self.paths[-1] + + def get_basename(self): + return os.path.basename(self.paths[-1]) + + def get_fobj(self): + return self.fobj + + def is_host_path(self): + return len(self.paths) == 1 + + def close(self): + self.fobj.close() + + def create_sub_path(self, sub_path, fobj, size, seekable, file_based): + paths = self.paths[:] + paths.append(sub_path) + return ScanFile(paths, fobj, size, seekable, file_based) + + def create_clone(self, seekable, file_based): + src_fobj = self.fobj + # create a temp file + if file_based: + fobj = tempfile.TemporaryFile() + # copy original file + blk_size = 4096 + while True: + buf = src_fobj.read(blk_size) + if len(buf) == 0: + break + fobj.write(buf) + # create a string buffer + else: + data = src_fobj.read() + fobj = io.StringIO(data) + # close old scan file + src_fobj.close() + # create promoted file + return ScanFile(self.paths, fobj, self.size, seekable, file_based) diff --git a/amitools/tools/fdtool.py b/amitools/tools/fdtool.py index 027056f4..fdfa746c 100755 --- a/amitools/tools/fdtool.py +++ b/amitools/tools/fdtool.py @@ -10,78 +10,114 @@ # ----- dump ----- + def dump(fname, fd, add_private): - print(fname) - print((" base: %s" % fd.get_base_name())) - funcs = fd.get_funcs() - num = 1 - for f in funcs: - if add_private or not f.is_private(): - bias = f.get_bias() - print((" #%04d %5d 0x%04x %30s %s" % (num,bias,bias,f.get_name(),f.get_arg_str()))) - num += 1 + print(fname) + print((" base: %s" % fd.get_base_name())) + funcs = fd.get_funcs() + num = 1 + for f in funcs: + if add_private or not f.is_private(): + bias = f.get_bias() + print( + ( + " #%04d %5d 0x%04x %30s %s" + % (num, bias, bias, f.get_name(), f.get_arg_str()) + ) + ) + num += 1 + # ----- generate ----- + def generate_python_code(fd, add_private): - funcs = fd.get_funcs() - for f in funcs: - if add_private or not f.is_private(): - args = f.get_args() - if len(args)>0: - args = tuple(args) - else: - args = None - print(" (%d, '%s', %s)," % (f.get_bias(),f.get_name(),args)) + funcs = fd.get_funcs() + for f in funcs: + if add_private or not f.is_private(): + args = f.get_args() + if len(args) > 0: + args = tuple(args) + else: + args = None + print(" (%d, '%s', %s)," % (f.get_bias(), f.get_name(), args)) + def generate_sasc_code(fname, fd, add_private, prefix=""): - funcs = fd.get_funcs() - fo = open(fname, "w") - for f in funcs: - if add_private or not f.is_private(): - line = "__asm __saveds int %s%s(" % (prefix, f.get_name()) - args = f.get_args() - if args != None: - for a in args: - line += "register __%s int %s" % (a[1],a[0]) - if a != args[-1]: - line += ", " - else: - line += " void " - line += " )" - fo.write(line) - fo.write("{\n return 0;\n}\n\n") - fo.close() + funcs = fd.get_funcs() + fo = open(fname, "w") + for f in funcs: + if add_private or not f.is_private(): + line = "__asm __saveds int %s%s(" % (prefix, f.get_name()) + args = f.get_args() + if args != None: + for a in args: + line += "register __%s int %s" % (a[1], a[0]) + if a != args[-1]: + line += ", " + else: + line += " void " + line += " )" + fo.write(line) + fo.write("{\n return 0;\n}\n\n") + fo.close() + # ----- main ----- def main(): - # parse args - parser = argparse.ArgumentParser() - parser.add_argument('files', nargs='+') - parser.add_argument('-P', '--add-private', action='store_true', default=False, help="add private functions") - parser.add_argument('-p', '--gen-python', action='store_true', default=False, help="generate python code for vamos") - parser.add_argument('-f', '--gen-fd', action='store', default=None, help="generate a new fd file") - parser.add_argument('-c', '--gen-sasc', action='store', default=None, help="generate SAS C code file") - parser.add_argument('-E', '--prefix', action='store', default='', help="add prefix to functions in C") - args = parser.parse_args() - - # main loop - files = args.files - for fname in files: - fd = FDFormat.read_fd(fname) - code_gen = False - if args.gen_python: - generate_python_code(fd, args.add_private) - code_gen = True - if args.gen_sasc: - generate_sasc_code(args.gen_sasc, fd, args.add_private, args.prefix) - code_gen = True - if args.gen_fd != None: - FDFormat.write_fd(args.gen_fd, fd, args.add_private) - code_gen = True - if not code_gen: - dump(fname, fd, args.add_private) - - -if __name__ == '__main__': - main() + # parse args + parser = argparse.ArgumentParser() + parser.add_argument("files", nargs="+") + parser.add_argument( + "-P", + "--add-private", + action="store_true", + default=False, + help="add private functions", + ) + parser.add_argument( + "-p", + "--gen-python", + action="store_true", + default=False, + help="generate python code for vamos", + ) + parser.add_argument( + "-f", "--gen-fd", action="store", default=None, help="generate a new fd file" + ) + parser.add_argument( + "-c", + "--gen-sasc", + action="store", + default=None, + help="generate SAS C code file", + ) + parser.add_argument( + "-E", + "--prefix", + action="store", + default="", + help="add prefix to functions in C", + ) + args = parser.parse_args() + + # main loop + files = args.files + for fname in files: + fd = FDFormat.read_fd(fname) + code_gen = False + if args.gen_python: + generate_python_code(fd, args.add_private) + code_gen = True + if args.gen_sasc: + generate_sasc_code(args.gen_sasc, fd, args.add_private, args.prefix) + code_gen = True + if args.gen_fd != None: + FDFormat.write_fd(args.gen_fd, fd, args.add_private) + code_gen = True + if not code_gen: + dump(fname, fd, args.add_private) + + +if __name__ == "__main__": + main() diff --git a/amitools/tools/geotool.py b/amitools/tools/geotool.py index a0b74e72..d8557657 100644 --- a/amitools/tools/geotool.py +++ b/amitools/tools/geotool.py @@ -1,8 +1,6 @@ #!/usr/bin/env python3 - - import sys import os.path import amitools.util.KeyValue as KeyValue @@ -10,80 +8,86 @@ from amitools.fs.blkdev.DiskGeometry import DiskGeometry from amitools.fs.blkdev.BlkDevFactory import BlkDevFactory + def main(): - a = sys.argv - n = len(a) - if n < 3: - print("Usage: (detect [options] | setup | open [options] | create )") - print("""Options: + a = sys.argv + n = len(a) + if n < 3: + print( + "Usage: (detect [options] | setup | open [options] | create )" + ) + print( + """Options: size= chs=,, c= h= s= algo=1|2 - """) - return 1 - else: - cmd = a[1] - # detect disk geometry from given image file - if cmd == 'detect': - if os.path.exists(a[2]): - # its a file - size = os.path.getsize(a[2]) - else: - # parse size string - size = ByteSize.parse_byte_size_str(a[2]) - if size == None: - print("Invalid size!") - else: - d = DiskGeometry() - opts = None - if n > 3: - opts = KeyValue.parse_key_value_strings(a[3:]) - print("size: ",size) - print("opts: ",opts) - size = d.detect(size, opts) - if size != None: - print("geo: ", d) - else: - print("FAILED") - # setup a new disk geometry from options - elif cmd == 'setup' : - d = DiskGeometry() - opts = KeyValue.parse_key_value_strings(a[2:]) - print("opts: ", opts) - size = d.setup(opts) - if size != None: - print("setup: ", size, ByteSize.to_byte_size_str(size)) - print("geo: ", d) - else: - print("FAILED") - # open a blkdev and detect geometry - elif cmd == 'open': - opts = None - if n > 3: - opts = KeyValue.parse_key_value_strings(a[3:]) - print("opts: ", opts) - f = BlkDevFactory() - blkdev = f.open(a[2], options=opts) - if blkdev != None: - print("blkdev: ", blkdev.__class__.__name__) - print("geo: ", blkdev.get_geometry()) - blkdev.close() - else: - print("FAILED") - # create a new blkdev with setup geometry - elif cmd == 'create': - opts = KeyValue.parse_key_value_strings(a[3:]) - print("opts: ", opts) - f = BlkDevFactory() - blkdev = f.create(a[2], options=opts) - if blkdev != None: - print("blkdev: ",blkdev.__class__.__name__) - print("geo: ",blkdev.get_geometry()) - blkdev.close() - else: - print("FAILED") - return 0 + """ + ) + return 1 + else: + cmd = a[1] + # detect disk geometry from given image file + if cmd == "detect": + if os.path.exists(a[2]): + # its a file + size = os.path.getsize(a[2]) + else: + # parse size string + size = ByteSize.parse_byte_size_str(a[2]) + if size == None: + print("Invalid size!") + else: + d = DiskGeometry() + opts = None + if n > 3: + opts = KeyValue.parse_key_value_strings(a[3:]) + print("size: ", size) + print("opts: ", opts) + size = d.detect(size, opts) + if size != None: + print("geo: ", d) + else: + print("FAILED") + # setup a new disk geometry from options + elif cmd == "setup": + d = DiskGeometry() + opts = KeyValue.parse_key_value_strings(a[2:]) + print("opts: ", opts) + size = d.setup(opts) + if size != None: + print("setup: ", size, ByteSize.to_byte_size_str(size)) + print("geo: ", d) + else: + print("FAILED") + # open a blkdev and detect geometry + elif cmd == "open": + opts = None + if n > 3: + opts = KeyValue.parse_key_value_strings(a[3:]) + print("opts: ", opts) + f = BlkDevFactory() + blkdev = f.open(a[2], options=opts) + if blkdev != None: + print("blkdev: ", blkdev.__class__.__name__) + print("geo: ", blkdev.get_geometry()) + blkdev.close() + else: + print("FAILED") + # create a new blkdev with setup geometry + elif cmd == "create": + opts = KeyValue.parse_key_value_strings(a[3:]) + print("opts: ", opts) + f = BlkDevFactory() + blkdev = f.create(a[2], options=opts) + if blkdev != None: + print("blkdev: ", blkdev.__class__.__name__) + print("geo: ", blkdev.get_geometry()) + blkdev.close() + else: + print("FAILED") + return 0 + -if __name__ == '__main__': - sys.exit(main()) +if __name__ == "__main__": + sys.exit(main()) diff --git a/amitools/tools/hunktool.py b/amitools/tools/hunktool.py index 2b84a993..82bfd4be 100755 --- a/amitools/tools/hunktool.py +++ b/amitools/tools/hunktool.py @@ -21,221 +21,300 @@ import amitools.binfmt.elf from amitools.util.HexDump import * + def print_pretty(data): - pp = pprint.PrettyPrinter(indent=2) - pp.pprint(data) + pp = pprint.PrettyPrinter(indent=2) + pp.pprint(data) + # ----- commands ------------------------------------------------------------- + class HunkCommand: - def __init__(self, args): - self.counts = {} - self.args = args - self.failed_files = [] - - def handle_file(self, path, hunk_file, error_code, delta): - if error_code not in self.counts: - self.counts[error_code] = 0 - self.counts[error_code] += 1 - - print("%s (%.4fs)" % (path, delta), end=' ') - - # abort if hunk parser failed! - if error_code != Hunk.RESULT_OK: - print(Hunk.result_names[error_code], hunk_file.error_string) - if self.args.dump: - print_pretty(hunk_file.hunks) - self.failed_files.append( (path, "READ: " + hunk_file.error_string) ) - return not self.args.stop - - # if verbose then print block structure - if self.args.verbose: - print() - print(" hunks: ",hunk_file.get_hunk_summary()) - if self.args.dump: - print_pretty(hunk_file.hunks) - print(" type: ", end=' ') - - # build segments from hunks - ok = hunk_file.build_segments() - if not ok: - print("BUILD SEGMENTS FAILED: %s" % (hunk_file.error_string)) - self.failed_files.append( (path, "BUILD: " + hunk_file.error_string) ) - return not self.args.stop - - # print recognized file type name - print(Hunk.type_names[hunk_file.type], end=' ') - - # if verbose then print hunk structure - if self.args.verbose: - print() - print(" segments: ",hunk_file.get_segment_summary()) - print(" overlays: ",hunk_file.get_overlay_segment_summary()) - print(" libs: ",hunk_file.get_libs_summary()) - print(" units: ",hunk_file.get_units_summary()) - if self.args.dump: - print_pretty(hunk_file.hunks) - else: - print() - - # do special processing on hunk file for command - ok = self.handle_hunk_file(path, hunk_file) - return ok - - def result(self): - for code in list(self.counts.keys()): - print(Hunk.result_names[code],":",self.counts[code]) - for failed in self.failed_files: - print(failed[0],failed[1]) - return 0 - - def process_file(self, scan_file): - path = scan_file.get_path() - fobj = scan_file.get_fobj() - hunk_file = HunkReader.HunkReader() - start = time.perf_counter() - result = hunk_file.read_file_obj(path,fobj) - end = time.perf_counter() - delta = end - start - # ignore non hunk files - if result == Hunk.RESULT_NO_HUNK_FILE: - return True - return self.handle_file(path, hunk_file, result, delta) - - def run(self): - # setup error handler - def error_handler(sf, e): - print("FAILED", sf.get_path(), e) - return not self.args.stop - def warning_handler(sf, msg): - print("WARNING", sf.get_path(), msg) - # setup scanners - scanners = [ADFSScanner(), ZipScanner(), LhaScanner()] - scanner = FileScanner(self.process_file, - error_handler=error_handler, - warning_handler=warning_handler, - scanners=scanners) - for path in self.args.files: - ok = scanner.scan(path) - if not ok: - print("ABORTED") - return False - return True + def __init__(self, args): + self.counts = {} + self.args = args + self.failed_files = [] + + def handle_file(self, path, hunk_file, error_code, delta): + if error_code not in self.counts: + self.counts[error_code] = 0 + self.counts[error_code] += 1 + + print("%s (%.4fs)" % (path, delta), end=" ") + + # abort if hunk parser failed! + if error_code != Hunk.RESULT_OK: + print(Hunk.result_names[error_code], hunk_file.error_string) + if self.args.dump: + print_pretty(hunk_file.hunks) + self.failed_files.append((path, "READ: " + hunk_file.error_string)) + return not self.args.stop + + # if verbose then print block structure + if self.args.verbose: + print() + print(" hunks: ", hunk_file.get_hunk_summary()) + if self.args.dump: + print_pretty(hunk_file.hunks) + print(" type: ", end=" ") + + # build segments from hunks + ok = hunk_file.build_segments() + if not ok: + print("BUILD SEGMENTS FAILED: %s" % (hunk_file.error_string)) + self.failed_files.append((path, "BUILD: " + hunk_file.error_string)) + return not self.args.stop + + # print recognized file type name + print(Hunk.type_names[hunk_file.type], end=" ") + + # if verbose then print hunk structure + if self.args.verbose: + print() + print(" segments: ", hunk_file.get_segment_summary()) + print(" overlays: ", hunk_file.get_overlay_segment_summary()) + print(" libs: ", hunk_file.get_libs_summary()) + print(" units: ", hunk_file.get_units_summary()) + if self.args.dump: + print_pretty(hunk_file.hunks) + else: + print() + + # do special processing on hunk file for command + ok = self.handle_hunk_file(path, hunk_file) + return ok + + def result(self): + for code in list(self.counts.keys()): + print(Hunk.result_names[code], ":", self.counts[code]) + for failed in self.failed_files: + print(failed[0], failed[1]) + return 0 + + def process_file(self, scan_file): + path = scan_file.get_path() + fobj = scan_file.get_fobj() + hunk_file = HunkReader.HunkReader() + start = time.perf_counter() + result = hunk_file.read_file_obj(path, fobj) + end = time.perf_counter() + delta = end - start + # ignore non hunk files + if result == Hunk.RESULT_NO_HUNK_FILE: + return True + return self.handle_file(path, hunk_file, result, delta) + + def run(self): + # setup error handler + def error_handler(sf, e): + print("FAILED", sf.get_path(), e) + return not self.args.stop + + def warning_handler(sf, msg): + print("WARNING", sf.get_path(), msg) + + # setup scanners + scanners = [ADFSScanner(), ZipScanner(), LhaScanner()] + scanner = FileScanner( + self.process_file, + error_handler=error_handler, + warning_handler=warning_handler, + scanners=scanners, + ) + for path in self.args.files: + ok = scanner.scan(path) + if not ok: + print("ABORTED") + return False + return True + # ----- Validator ----- + class Validator(HunkCommand): + def handle_hunk_file(self, path, hunk_file): + # do nothing extra + return True - def handle_hunk_file(self, path, hunk_file): - # do nothing extra - return True # ----- Info ----- + class Info(HunkCommand): + def handle_hunk_file(self, path, hunk_file): + args = self.args + # verbose all hunk + hs = HunkShow.HunkShow( + hunk_file, + show_relocs=args.show_relocs, + show_debug=args.show_debug, + disassemble=args.disassemble, + disassemble_start=args.disassemble_start, + cpu=args.cpu, + hexdump=args.hexdump, + brief=args.brief, + ) + hs.show_segments() + return True - def handle_hunk_file(self, path, hunk_file): - args = self.args - # verbose all hunk - hs = HunkShow.HunkShow(hunk_file, \ - show_relocs=args.show_relocs, show_debug=args.show_debug, \ - disassemble=args.disassemble, disassemble_start=args.disassemble_start, \ - cpu=args.cpu, \ - hexdump=args.hexdump, \ - brief=args.brief) - hs.show_segments() - return True # ----- Relocate ----- + class Relocate(HunkCommand): + def handle_hunk_file(self, path, hunk_file): + if hunk_file.type != Hunk.TYPE_LOADSEG: + print("ERROR: can only relocate LoadSeg()able files:", path) + return False + + rel = HunkRelocate.HunkRelocate(hunk_file, verbose=self.args.verbose) + # get sizes of all segments + sizes = rel.get_sizes() + # calc begin addrs for all segments + base_addr = self.args.base_address + addrs = rel.get_seq_addrs(base_addr) + # relocate and return data of segments + datas = rel.relocate(addrs) + if datas == None: + print("ERROR: relocation failed:", path) + return False + else: + print("Relocate to base address", base_addr) + print("Bases: ", " ".join(["%06x" % (x) for x in addrs])) + print("Sizes: ", " ".join(["%06x" % (x) for x in sizes])) + print("Data: ", " ".join(["%06x" % (len(x)) for x in datas])) + print("Total: ", "%06x" % (rel.get_total_size())) + if args.hexdump: + for d in datas: + print_hex(d) + return True - def handle_hunk_file(self, path, hunk_file): - if hunk_file.type != Hunk.TYPE_LOADSEG: - print("ERROR: can only relocate LoadSeg()able files:",path); - return False - - rel = HunkRelocate.HunkRelocate(hunk_file,verbose=self.args.verbose) - # get sizes of all segments - sizes = rel.get_sizes() - # calc begin addrs for all segments - base_addr = self.args.base_address - addrs = rel.get_seq_addrs(base_addr) - # relocate and return data of segments - datas = rel.relocate(addrs) - if datas == None: - print("ERROR: relocation failed:",path) - return False - else: - print("Relocate to base address",base_addr) - print("Bases: "," ".join(["%06x"%(x) for x in addrs])) - print("Sizes: "," ".join(["%06x"%(x) for x in sizes])) - print("Data: "," ".join(["%06x"%(len(x)) for x in datas])) - print("Total: ","%06x"%(rel.get_total_size())) - if args.hexdump: - for d in datas: - print_hex(d) - return True # ----- Elf2Hunk ----- + class ElfInfo: - def __init__(self,args): - self.args = args - - def run(self): - for f in args.files: - reader = amitools.binfmt.elf.ELFReader() - elf = reader.load(open(f, "rb")) - if elf is None: - print("ERROR loading ELF:",elf.error_string) - return 1 - dumper = amitools.binfmt.elf.ELFDumper(elf) - dumper.dump_sections(show_relocs=args.show_relocs, show_debug=args.show_debug) - dumper.dump_symbols() - dumper.dump_relas() - return 0 + def __init__(self, args): + self.args = args + + def run(self): + for f in args.files: + reader = amitools.binfmt.elf.ELFReader() + elf = reader.load(open(f, "rb")) + if elf is None: + print("ERROR loading ELF:", elf.error_string) + return 1 + dumper = amitools.binfmt.elf.ELFDumper(elf) + dumper.dump_sections( + show_relocs=args.show_relocs, show_debug=args.show_debug + ) + dumper.dump_symbols() + dumper.dump_relas() + return 0 + # ----- main ----- def main(): - # call scanner and process all files with selected command - cmd_map = { - "validate" : Validator, - "info" : Info, - "elfinfo" : ElfInfo, - "relocate" : Relocate - } - - parser = argparse.ArgumentParser() - parser.add_argument('command', help="command: "+",".join(list(cmd_map.keys()))) - parser.add_argument('files', nargs='+') - parser.add_argument('-d', '--dump', action='store_true', default=False, help="dump the hunk structure") - parser.add_argument('-v', '--verbose', action='store_true', default=False, help="be more verbos") - parser.add_argument('-s', '--stop', action='store_true', default=False, help="stop on error") - parser.add_argument('-R', '--show-relocs', action='store_true', default=False, help="show relocation entries") - parser.add_argument('-D', '--show-debug', action='store_true', default=False, help="show debug info entries") - parser.add_argument('-A', '--disassemble', action='store_true', default=False, help="disassemble code segments") - parser.add_argument('-S', '--disassemble-start', action='store', type=int, default=0, help="start address for dissassembly") - parser.add_argument('-x', '--hexdump', action='store_true', default=False, help="dump segments in hex") - parser.add_argument('-b', '--brief', action='store_true', default=False, help="show only brief information") - parser.add_argument('-B', '--base-address', action='store', type=int, default=0, help="base address for relocation") - parser.add_argument('-c', '--cpu', action='store', default='68000', help="disassemble for given cpu (objdump only)") - args = parser.parse_args() - - cmd = args.command - if cmd not in cmd_map: - print("INVALID COMMAND:",cmd) - print("valid commands are:") - for a in cmd_map: - print(" ",a) - return 1 - cmd_cls = cmd_map[cmd] - - # execute command - cmd = cmd_cls(args) - res = cmd.run() - return res - - -if __name__ == '__main__': - sys.exit(main()) + # call scanner and process all files with selected command + cmd_map = { + "validate": Validator, + "info": Info, + "elfinfo": ElfInfo, + "relocate": Relocate, + } + + parser = argparse.ArgumentParser() + parser.add_argument("command", help="command: " + ",".join(list(cmd_map.keys()))) + parser.add_argument("files", nargs="+") + parser.add_argument( + "-d", + "--dump", + action="store_true", + default=False, + help="dump the hunk structure", + ) + parser.add_argument( + "-v", "--verbose", action="store_true", default=False, help="be more verbos" + ) + parser.add_argument( + "-s", "--stop", action="store_true", default=False, help="stop on error" + ) + parser.add_argument( + "-R", + "--show-relocs", + action="store_true", + default=False, + help="show relocation entries", + ) + parser.add_argument( + "-D", + "--show-debug", + action="store_true", + default=False, + help="show debug info entries", + ) + parser.add_argument( + "-A", + "--disassemble", + action="store_true", + default=False, + help="disassemble code segments", + ) + parser.add_argument( + "-S", + "--disassemble-start", + action="store", + type=int, + default=0, + help="start address for dissassembly", + ) + parser.add_argument( + "-x", + "--hexdump", + action="store_true", + default=False, + help="dump segments in hex", + ) + parser.add_argument( + "-b", + "--brief", + action="store_true", + default=False, + help="show only brief information", + ) + parser.add_argument( + "-B", + "--base-address", + action="store", + type=int, + default=0, + help="base address for relocation", + ) + parser.add_argument( + "-c", + "--cpu", + action="store", + default="68000", + help="disassemble for given cpu (objdump only)", + ) + args = parser.parse_args() + + cmd = args.command + if cmd not in cmd_map: + print("INVALID COMMAND:", cmd) + print("valid commands are:") + for a in cmd_map: + print(" ", a) + return 1 + cmd_cls = cmd_map[cmd] + + # execute command + cmd = cmd_cls(args) + res = cmd.run() + return res + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/amitools/tools/rdbtool.py b/amitools/tools/rdbtool.py index 68dd49e6..8e6d1d0d 100755 --- a/amitools/tools/rdbtool.py +++ b/amitools/tools/rdbtool.py @@ -3,8 +3,6 @@ # swiss army knife for rdb disk images or devices - - import sys import argparse import os.path @@ -25,782 +23,877 @@ # ----- commands ----- class Command: - def __init__(self, args, opts, edit=False): - self.args = args - self.opts = opts - self.edit = edit - self.exit_code = 0 - self.blkdev = None - self.rdisk = None - - def run(self, blkdev, rdisk): - # optional init blkdev function - if hasattr(self, "init_blkdev"): - if blkdev == None: - self.blkdev = self.init_blkdev(self.args.image_file) - if self.blkdev == None: - return 5 - blkdev = self.blkdev - - # optional init rdisk function - if hasattr(self, "init_rdisk"): - # close old - if rdisk != None: - rdisk.close() - # create new rdisk - self.rdisk = self.init_rdisk(blkdev) - if self.rdisk == None: - return 6 - rdisk= self.rdisk - - # common handler - if hasattr(self, 'handle_blkdev'): - return self.handle_blkdev(blkdev) - elif hasattr(self, 'handle_rdisk'): - return self.handle_rdisk(rdisk) - else: - return 0 - - def has_init_blkdev(self): - return hasattr(self, 'init_blkdev') - - def need_rdisk(self): - return hasattr(self, 'handle_rdisk') and not hasattr(self, 'init_rdisk') + def __init__(self, args, opts, edit=False): + self.args = args + self.opts = opts + self.edit = edit + self.exit_code = 0 + self.blkdev = None + self.rdisk = None + + def run(self, blkdev, rdisk): + # optional init blkdev function + if hasattr(self, "init_blkdev"): + if blkdev == None: + self.blkdev = self.init_blkdev(self.args.image_file) + if self.blkdev == None: + return 5 + blkdev = self.blkdev + + # optional init rdisk function + if hasattr(self, "init_rdisk"): + # close old + if rdisk != None: + rdisk.close() + # create new rdisk + self.rdisk = self.init_rdisk(blkdev) + if self.rdisk == None: + return 6 + rdisk = self.rdisk + + # common handler + if hasattr(self, "handle_blkdev"): + return self.handle_blkdev(blkdev) + elif hasattr(self, "handle_rdisk"): + return self.handle_rdisk(rdisk) + else: + return 0 + + def has_init_blkdev(self): + return hasattr(self, "init_blkdev") + + def need_rdisk(self): + return hasattr(self, "handle_rdisk") and not hasattr(self, "init_rdisk") + class FSCommandQueue(CommandQueue): - def __init__(self, args, cmd_list, sep, cmd_map): - CommandQueue.__init__(self, cmd_list, sep, cmd_map) - self.args = args - self.blkdev = None - self.rdisk = None - - def run(self): - self.img = self.args.image_file - try: - # main command loop - exit_code = CommandQueue.run(self) - except FSError as e: - cmd = "'%s'" % " ".join(self.cmd_line) - print(cmd, "FSError:", str(e)) - exit_code = 3 - except IOError as e: - cmd = "'%s'" % " ".join(self.cmd_line) - print(cmd, "IOError:", str(e)) - exit_code = 4 - finally: - # close rdisk - if self.rdisk != None: - self.rdisk.close() + def __init__(self, args, cmd_list, sep, cmd_map): + CommandQueue.__init__(self, cmd_list, sep, cmd_map) + self.args = args + self.blkdev = None + self.rdisk = None + + def run(self): + self.img = self.args.image_file + try: + # main command loop + exit_code = CommandQueue.run(self) + except FSError as e: + cmd = "'%s'" % " ".join(self.cmd_line) + print(cmd, "FSError:", str(e)) + exit_code = 3 + except IOError as e: + cmd = "'%s'" % " ".join(self.cmd_line) + print(cmd, "IOError:", str(e)) + exit_code = 4 + finally: + # close rdisk + if self.rdisk != None: + self.rdisk.close() + if self.args.verbose: + print("closing rdisk:", self.img) + # close blkdev + if self.blkdev != None: + self.blkdev.close() + if self.args.verbose: + print("closing image:", self.img) + return exit_code + + def create_cmd(self, cclass, name, opts): + return cclass(self.args, opts) + + def _open_rdisk(self): + if self.rdisk == None: + self.rdisk = RDisk(self.blkdev) + if self.args.verbose: + print("opening rdisk:", self.img) + return self.rdisk.open() + else: + return True + + def run_first(self, cmd_line, cmd): + self.cmd_line = cmd_line + + # check if first command is an init command + if not cmd.has_init_blkdev(): + # auto add 'open' command + pre_cmd = OpenCommand(self.args, []) + if self.args.verbose: + print("auto open command:", self.cmd_line) + exit_code = pre_cmd.run(self.blkdev, self.rdisk) + if self.args.verbose: + print("auto open exit_code:", exit_code) + if exit_code != 0: + return exit_code + self.blkdev = pre_cmd.blkdev + # setup rdisk (if necessary) + if cmd.need_rdisk(): + if not self._open_rdisk(): + raise IOError("No RDB Disk?") + + # run first command if self.args.verbose: - print("closing rdisk:", self.img) - # close blkdev - if self.blkdev != None: - self.blkdev.close() + print("command:", self.cmd_line) + if cmd.edit and self.args.read_only: + raise IOError("Edit commands not allowed in read-only mode") + + # check code of command after __init__ parsing + if cmd.exit_code != 0: + return cmd.exit_code + + # perform command + exit_code = cmd.run(self.blkdev, self.rdisk) + if cmd.blkdev != None: + self.blkdev = cmd.blkdev + if cmd.rdisk != None: + self.rdisk = cmd.rdisk + + # final exit code if self.args.verbose: - print("closing image:", self.img) - return exit_code - - def create_cmd(self, cclass, name, opts): - return cclass(self.args, opts) - - def _open_rdisk(self): - if self.rdisk == None: - self.rdisk = RDisk(self.blkdev) - if self.args.verbose: - print("opening rdisk:", self.img) - return self.rdisk.open() - else: - return True - - def run_first(self, cmd_line, cmd): - self.cmd_line = cmd_line - - # check if first command is an init command - if not cmd.has_init_blkdev(): - # auto add 'open' command - pre_cmd = OpenCommand(self.args, []) - if self.args.verbose: - print("auto open command:", self.cmd_line) - exit_code = pre_cmd.run(self.blkdev, self.rdisk) - if self.args.verbose: - print("auto open exit_code:", exit_code) - if exit_code != 0: + print("exit_code:", exit_code) return exit_code - self.blkdev = pre_cmd.blkdev - # setup rdisk (if necessary) - if cmd.need_rdisk(): - if not self._open_rdisk(): - raise IOError("No RDB Disk?") - - # run first command - if self.args.verbose: - print("command:", self.cmd_line) - if cmd.edit and self.args.read_only: - raise IOError("Edit commands not allowed in read-only mode") - - # check code of command after __init__ parsing - if cmd.exit_code != 0: - return cmd.exit_code - - # perform command - exit_code = cmd.run(self.blkdev, self.rdisk) - if cmd.blkdev != None: - self.blkdev = cmd.blkdev - if cmd.rdisk != None: - self.rdisk = cmd.rdisk - - # final exit code - if self.args.verbose: - print("exit_code:", exit_code) - return exit_code - - def run_next(self, cmd_line, cmd): - self.cmd_line = cmd_line - if self.args.verbose: - print("command:", self.cmd_line) - # verify command - if cmd.edit and self.args.read_only: - raise IOError("Edit commands not allowed in read-only mode") - # make sure rdisk is set up - if self.rdisk == None and cmd.need_rdisk(): - if not self._open_rdisk(): - raise IOError("No RDB Disk?") - # run command - exit_code = cmd.run(self.blkdev, self.rdisk) - if cmd.blkdev != None: - self.blkdev = cmd.blkdev - if cmd.rdisk != None: - self.rdisk = cmd.rdisk - if self.args.verbose: - print("exit_code:", exit_code) - return exit_code + + def run_next(self, cmd_line, cmd): + self.cmd_line = cmd_line + if self.args.verbose: + print("command:", self.cmd_line) + # verify command + if cmd.edit and self.args.read_only: + raise IOError("Edit commands not allowed in read-only mode") + # make sure rdisk is set up + if self.rdisk == None and cmd.need_rdisk(): + if not self._open_rdisk(): + raise IOError("No RDB Disk?") + # run command + exit_code = cmd.run(self.blkdev, self.rdisk) + if cmd.blkdev != None: + self.blkdev = cmd.blkdev + if cmd.rdisk != None: + self.rdisk = cmd.rdisk + if self.args.verbose: + print("exit_code:", exit_code) + return exit_code + # ----- Commands ------------------------------------------------------------- # --- Open RDISK device/image --- + class OpenCommand(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts) - def init_blkdev(self, file_name): - # make sure image file exists - if not os.path.exists(file_name): - raise IOError("Image File not found: '%s'" % file_name) - # parse opts - opts = KeyValue.parse_key_value_strings(self.opts) - # is a block size given in options? if yes then enforce it - bs = 512 - opts_bs = self._get_opts_block_size(opts) - if opts_bs: - bs = opts_bs - # setup initial raw block dev with default block size - blkdev = RawBlockDevice(file_name, self.args.read_only, block_bytes=bs) - blkdev.open() - # if no bs was given in options then try to find out block size - # from an existing rdb - if not opts_bs: - rd = RDisk(blkdev) - peek_bs = rd.peek_block_size() - # real block size differs: re-open dev with correct size - if peek_bs and peek_bs != blkdev.block_bytes: - blkdev.close() - blkdev = RawBlockDevice(file_name, self.args.read_only, - block_bytes=peek_bs) + def __init__(self, args, opts): + Command.__init__(self, args, opts) + + def init_blkdev(self, file_name): + # make sure image file exists + if not os.path.exists(file_name): + raise IOError("Image File not found: '%s'" % file_name) + # parse opts + opts = KeyValue.parse_key_value_strings(self.opts) + # is a block size given in options? if yes then enforce it + bs = 512 + opts_bs = self._get_opts_block_size(opts) + if opts_bs: + bs = opts_bs + # setup initial raw block dev with default block size + blkdev = RawBlockDevice(file_name, self.args.read_only, block_bytes=bs) blkdev.open() - bs = peek_bs - # try to guess geometry - file_size = blkdev.num_blocks * blkdev.block_bytes - geo = DiskGeometry(block_bytes=bs) - if not geo.detect(file_size, opts): - raise IOError("Can't detect geometry of disk: '%s'" % file_name) - # make sure block size is still the same - if geo.block_bytes != bs: - raise IOError("Invalid geo block size chosen: %d" % geo.block_bytes) - # keep geo - blkdev.geo = geo - return blkdev - - def _get_opts_block_size(self, opts): - if opts and 'bs' in opts: - bs = int(opts['bs']) - if bs % 512 != 0 or bs < 512: - raise IOError("Invalid block size given!") - return bs + # if no bs was given in options then try to find out block size + # from an existing rdb + if not opts_bs: + rd = RDisk(blkdev) + peek_bs = rd.peek_block_size() + # real block size differs: re-open dev with correct size + if peek_bs and peek_bs != blkdev.block_bytes: + blkdev.close() + blkdev = RawBlockDevice( + file_name, self.args.read_only, block_bytes=peek_bs + ) + blkdev.open() + bs = peek_bs + # try to guess geometry + file_size = blkdev.num_blocks * blkdev.block_bytes + geo = DiskGeometry(block_bytes=bs) + if not geo.detect(file_size, opts): + raise IOError("Can't detect geometry of disk: '%s'" % file_name) + # make sure block size is still the same + if geo.block_bytes != bs: + raise IOError("Invalid geo block size chosen: %d" % geo.block_bytes) + # keep geo + blkdev.geo = geo + return blkdev + + def _get_opts_block_size(self, opts): + if opts and "bs" in opts: + bs = int(opts["bs"]) + if bs % 512 != 0 or bs < 512: + raise IOError("Invalid block size given!") + return bs + # --- Create new RDISK device/image --- + class CreateCommand(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - def init_blkdev(self, file_name): - # do not overwrite an existing image file - if os.path.exists(file_name) and not self.args.force: - raise IOError("Image File already exists: '%s'" % file_name) - # make sure size is given - if len(self.opts) < 1: - print("Usage: create ( size= | chs= ) [bs=]") - return None - # determine disk geometry - opts = KeyValue.parse_key_value_strings(self.opts) - geo = DiskGeometry() - if not geo.setup(opts): - raise IOError("Can't set geometry of disk: '%s'" % file_name) - # create new empty image file for geometry - blkdev = RawBlockDevice(file_name, block_bytes=geo.block_bytes) - blkdev.create(geo.get_num_blocks()) - blkdev.geo = geo - return blkdev + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def init_blkdev(self, file_name): + # do not overwrite an existing image file + if os.path.exists(file_name) and not self.args.force: + raise IOError("Image File already exists: '%s'" % file_name) + # make sure size is given + if len(self.opts) < 1: + print("Usage: create ( size= | chs= ) [bs=]") + return None + # determine disk geometry + opts = KeyValue.parse_key_value_strings(self.opts) + geo = DiskGeometry() + if not geo.setup(opts): + raise IOError("Can't set geometry of disk: '%s'" % file_name) + # create new empty image file for geometry + blkdev = RawBlockDevice(file_name, block_bytes=geo.block_bytes) + blkdev.create(geo.get_num_blocks()) + blkdev.geo = geo + return blkdev + # --- Init existing disk image --- + class InitCommand(OpenCommand): - def init_rdisk(self, blkdev): - opts = KeyValue.parse_key_value_strings(self.opts) - # number of cylinders for RDB - if 'rdb_cyls' in opts: - rdb_cyls = int(opts['rdb_cyls']) - else: - rdb_cyls = 1 - rdisk = RDisk(blkdev) - rdisk.create(blkdev.geo, rdb_cyls=rdb_cyls) - return rdisk + def init_rdisk(self, blkdev): + opts = KeyValue.parse_key_value_strings(self.opts) + # number of cylinders for RDB + if "rdb_cyls" in opts: + rdb_cyls = int(opts["rdb_cyls"]) + else: + rdb_cyls = 1 + rdisk = RDisk(blkdev) + rdisk.create(blkdev.geo, rdb_cyls=rdb_cyls) + return rdisk + # --- Info about rdisk ---- + class InfoCommand(Command): - def handle_rdisk(self, rdisk): - part_name = None - if len(self.opts) > 0: - part_name = self.opts[0] - lines = rdisk.get_info(part_name) - for l in lines: - print(l) - return 0 + def handle_rdisk(self, rdisk): + part_name = None + if len(self.opts) > 0: + part_name = self.opts[0] + lines = rdisk.get_info(part_name) + for l in lines: + print(l) + return 0 + # --- Show rdisk structures --- + class ShowCommand(Command): - def handle_rdisk(self, rdisk): - show_hex = "hex" in self.opts - rdisk.dump(show_hex) - return 0 + def handle_rdisk(self, rdisk): + show_hex = "hex" in self.opts + rdisk.dump(show_hex) + return 0 + # --- Show allocation map --- + class MapCommand(Command): - def handle_rdisk(self, rdisk): - bm = rdisk.get_block_map() - num = 0 - off = 0 - for i in bm: - if num == 0: - print("%06d: " % off, end="") - print(i, end="") - off += 1 - num += 1 - if num == 16: + def handle_rdisk(self, rdisk): + bm = rdisk.get_block_map() num = 0 - print("") - return 0 + off = 0 + for i in bm: + if num == 0: + print("%06d: " % off, end="") + print(i, end="") + off += 1 + num += 1 + if num == 16: + num = 0 + print("") + return 0 + # --- Free Partition Ranges + class FreeCommand(Command): - def handle_rdisk(self, rdisk): - ranges = rdisk.get_free_cyl_ranges() - for r in ranges: - print(r) - return 0 + def handle_rdisk(self, rdisk): + ranges = rdisk.get_free_cyl_ranges() + for r in ranges: + print(r) + return 0 + # --- Add a partition --- + class PartEditCommand(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - - def parse_opts(self, rdisk): - self.popts = KeyValue.parse_key_value_strings(self.opts) - self.rdisk = rdisk - - def get_dos_type(self, empty=False): - if 'fs' in self.popts: - fs_str = self.popts['fs'] - elif 'dostype' in self.popts: - fs_str = self.popts['dostype'] - elif not empty: - fs_str = self.args.dostype - else: - return None - return parse_dos_type_str(str(fs_str)) - - def get_drv_name(self, empty=False): - if 'name' in self.popts: - drv_name = self.popts['name'] - elif empty: - return None - else: - drv_name = "%s%d" % (self.args.drive_prefix, self.rdisk.get_num_partitions()) - return FSString(drv_name) - - def get_bootable(self, empty=False): - if 'bootable' in self.popts: - return bool(self.popts['bootable']) - elif not empty: - return False - else: - return None - - def get_boot_pri(self, empty=False): - if 'pri' in self.popts: - return self.popts['pri'] - elif not empty: - return 0 - else: - return None - - def get_automount(self, empty=False): - if 'automount' in self.popts: - return bool(self.popts['automount']) - elif not empty: - return True - else: - return None - - def get_fs_block_size(self, empty=False): - if 'bs' in self.popts: - return int(self.popts['bs']) - elif not empty: - return 512 - else: - return None - - def get_flags(self, empty=False, old_flags=0): - flags = 0 - bootable = self.get_bootable(empty=empty) - if bootable is not None: - if bootable: - flags |= PartitionBlock.FLAG_BOOTABLE - else: - flags |= (old_flags) & PartitionBlock.FLAG_BOOTABLE - automount = self.get_automount(empty=empty) - if automount is not None: - if not automount: - flags |= PartitionBlock.FLAG_NO_AUTOMOUNT - else: - flags |= (old_flags) & PartitionBlock.FLAG_NO_AUTOMOUNT - return flags - - def get_more_dos_env(self): - more_dos_env = [] - valid_keys = PartitionDosEnv.valid_keys - for key in self.popts: - if key in valid_keys: - more_dos_env.append((key, self.popts[key])) - if len(more_dos_env) > 0: - return more_dos_env - else: - return None - - def get_more_dos_env_info(self): - valid_keys = PartitionDosEnv.valid_keys - info = ["[%s=]" % x for x in valid_keys] - return " ".join(info) - - def get_cyl_range(self): - start = None - if 'start' in self.popts: - start = int(self.popts['start']) - # range with start= end= - if 'end' in self.popts: - end = int(self.popts['end']) - if start == None or end <= start: - return None - else: - return (start, end) - # expect a size - elif 'size' in self.popts: - size = self.popts['size'] - cyls = None - if type(size) == int: - cyls = size - # size in bytes - elif size[-1] in ('b','B'): - num_bytes = ByteSize.parse_byte_size_str(size[:-1]) - if num_bytes == None: - return None - cyls = num_bytes // self.rdisk.get_cylinder_bytes() - # size in percent - elif size[-1] == '%': - prc = float(size[:-1]) - cyls = int(prc * self.rdisk.get_logical_cylinders() / 100.0) - # size in cylinders - else: - cyls = ByteSize.parse_byte_size_str(size) - - # check cyls - if cyls == None or cyls < 1: - return None - # find a range if no start is given - if start == None: - start = self.rdisk.find_free_cyl_range_start(cyls) - if start == None: - return None - return (start, start + cyls - 1) - # nothing specified -> get next free range - else: - ranges = self.rdisk.get_free_cyl_ranges() - if ranges == None: - return None - return ranges[0] + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def parse_opts(self, rdisk): + self.popts = KeyValue.parse_key_value_strings(self.opts) + self.rdisk = rdisk + + def get_dos_type(self, empty=False): + if "fs" in self.popts: + fs_str = self.popts["fs"] + elif "dostype" in self.popts: + fs_str = self.popts["dostype"] + elif not empty: + fs_str = self.args.dostype + else: + return None + return parse_dos_type_str(str(fs_str)) + + def get_drv_name(self, empty=False): + if "name" in self.popts: + drv_name = self.popts["name"] + elif empty: + return None + else: + drv_name = "%s%d" % ( + self.args.drive_prefix, + self.rdisk.get_num_partitions(), + ) + return FSString(drv_name) + + def get_bootable(self, empty=False): + if "bootable" in self.popts: + return bool(self.popts["bootable"]) + elif not empty: + return False + else: + return None -class AddCommand(PartEditCommand): - def handle_rdisk(self, rdisk): - self.parse_opts(rdisk) - lo_hi = self.get_cyl_range() - if lo_hi == None: - print("ERROR: invalid partition range given!") - return 1 - dostype = self.get_dos_type() - if dostype == None: - print("ERROR: invalid dos type!") - return 1 - drv_name = self.get_drv_name() - if drv_name == None: - print("ERROR: invalid drive name!") - flags = self.get_flags() - boot_pri = self.get_boot_pri() - more_dos_env = self.get_more_dos_env() - fs_bs = self.get_fs_block_size(empty=True) - print("creating: '%s' %s %s" % (drv_name, lo_hi, num_to_tag_str(dostype))) - # add partition - if rdisk.add_partition(drv_name, lo_hi, dos_type=dostype, flags=flags, - boot_pri=boot_pri, more_dos_env=more_dos_env, - fs_block_size=fs_bs): - return 0 - else: - print("ERROR: creating partition: '%s': %s" % (drv_name, lo_hi)) - return 1 + def get_boot_pri(self, empty=False): + if "pri" in self.popts: + return self.popts["pri"] + elif not empty: + return 0 + else: + return None -class ChangeCommand(PartEditCommand): - def handle_rdisk(self, rdisk): - if len(self.opts) < 1: - print("Usage: change [name=] [dostype=] [automount=] [bootable=] [pri=] " + self.get_more_dos_env_info()) - return 1 - else: - p = rdisk.find_partition_by_string(self.opts[0]) - if p != None: + def get_automount(self, empty=False): + if "automount" in self.popts: + return bool(self.popts["automount"]) + elif not empty: + return True + else: + return None + + def get_fs_block_size(self, empty=False): + if "bs" in self.popts: + return int(self.popts["bs"]) + elif not empty: + return 512 + else: + return None + + def get_flags(self, empty=False, old_flags=0): + flags = 0 + bootable = self.get_bootable(empty=empty) + if bootable is not None: + if bootable: + flags |= PartitionBlock.FLAG_BOOTABLE + else: + flags |= (old_flags) & PartitionBlock.FLAG_BOOTABLE + automount = self.get_automount(empty=empty) + if automount is not None: + if not automount: + flags |= PartitionBlock.FLAG_NO_AUTOMOUNT + else: + flags |= (old_flags) & PartitionBlock.FLAG_NO_AUTOMOUNT + return flags + + def get_more_dos_env(self): + more_dos_env = [] + valid_keys = PartitionDosEnv.valid_keys + for key in self.popts: + if key in valid_keys: + more_dos_env.append((key, self.popts[key])) + if len(more_dos_env) > 0: + return more_dos_env + else: + return None + + def get_more_dos_env_info(self): + valid_keys = PartitionDosEnv.valid_keys + info = ["[%s=]" % x for x in valid_keys] + return " ".join(info) + + def get_cyl_range(self): + start = None + if "start" in self.popts: + start = int(self.popts["start"]) + # range with start= end= + if "end" in self.popts: + end = int(self.popts["end"]) + if start == None or end <= start: + return None + else: + return (start, end) + # expect a size + elif "size" in self.popts: + size = self.popts["size"] + cyls = None + if type(size) == int: + cyls = size + # size in bytes + elif size[-1] in ("b", "B"): + num_bytes = ByteSize.parse_byte_size_str(size[:-1]) + if num_bytes == None: + return None + cyls = num_bytes // self.rdisk.get_cylinder_bytes() + # size in percent + elif size[-1] == "%": + prc = float(size[:-1]) + cyls = int(prc * self.rdisk.get_logical_cylinders() / 100.0) + # size in cylinders + else: + cyls = ByteSize.parse_byte_size_str(size) + + # check cyls + if cyls == None or cyls < 1: + return None + # find a range if no start is given + if start == None: + start = self.rdisk.find_free_cyl_range_start(cyls) + if start == None: + return None + return (start, start + cyls - 1) + # nothing specified -> get next free range + else: + ranges = self.rdisk.get_free_cyl_ranges() + if ranges == None: + return None + return ranges[0] + + +class AddCommand(PartEditCommand): + def handle_rdisk(self, rdisk): self.parse_opts(rdisk) - dostype = self.get_dos_type(empty=True) - drv_name = self.get_drv_name(empty=True) - flags = self.get_flags(empty=True, old_flags=p.get_flags()) - boot_pri = self.get_boot_pri(empty=True) - fs_bs = self.get_fs_block_size(empty=True) + lo_hi = self.get_cyl_range() + if lo_hi == None: + print("ERROR: invalid partition range given!") + return 1 + dostype = self.get_dos_type() + if dostype == None: + print("ERROR: invalid dos type!") + return 1 + drv_name = self.get_drv_name() + if drv_name == None: + print("ERROR: invalid drive name!") + flags = self.get_flags() + boot_pri = self.get_boot_pri() more_dos_env = self.get_more_dos_env() - # change partition - if rdisk.change_partition(p.num, drv_name=drv_name, dos_type=dostype, - flags=flags, boot_pri=boot_pri, - more_dos_env=more_dos_env, - fs_block_size=fs_bs): - return 0 + fs_bs = self.get_fs_block_size(empty=True) + print("creating: '%s' %s %s" % (drv_name, lo_hi, num_to_tag_str(dostype))) + # add partition + if rdisk.add_partition( + drv_name, + lo_hi, + dos_type=dostype, + flags=flags, + boot_pri=boot_pri, + more_dos_env=more_dos_env, + fs_block_size=fs_bs, + ): + return 0 + else: + print("ERROR: creating partition: '%s': %s" % (drv_name, lo_hi)) + return 1 + + +class ChangeCommand(PartEditCommand): + def handle_rdisk(self, rdisk): + if len(self.opts) < 1: + print( + "Usage: change [name=] [dostype=] [automount=] [bootable=] [pri=] " + + self.get_more_dos_env_info() + ) + return 1 else: - print("ERROR: changing partition: '%s'" % (drv_name)) - return 1 - else: - print("Can't find partition: '%s'" % self.opts[0]) - return 1 + p = rdisk.find_partition_by_string(self.opts[0]) + if p != None: + self.parse_opts(rdisk) + dostype = self.get_dos_type(empty=True) + drv_name = self.get_drv_name(empty=True) + flags = self.get_flags(empty=True, old_flags=p.get_flags()) + boot_pri = self.get_boot_pri(empty=True) + fs_bs = self.get_fs_block_size(empty=True) + more_dos_env = self.get_more_dos_env() + # change partition + if rdisk.change_partition( + p.num, + drv_name=drv_name, + dos_type=dostype, + flags=flags, + boot_pri=boot_pri, + more_dos_env=more_dos_env, + fs_block_size=fs_bs, + ): + return 0 + else: + print("ERROR: changing partition: '%s'" % (drv_name)) + return 1 + else: + print("Can't find partition: '%s'" % self.opts[0]) + return 1 + # --- Export/Import file system image --- + class ExportCommand(Command): - def handle_rdisk(self, rdisk): - if len(self.opts) < 2: - print("Usage: export ") - return 1 - else: - part = self.opts[0] - file_name = self.opts[1] - p = rdisk.find_partition_by_string(part) - if p != None: - blkdev = p.create_blkdev() - blkdev.open() - num_blks = blkdev.num_blocks - print("exporting '%s' (%d blocks) to '%s'" % \ - (p.get_drive_name(), num_blks, file_name)) - try: - with open(file_name, "wb") as fh: - for b in range(num_blks): - data = blkdev.read_block(b) - fh.write(data) - except IOError as e: - print("Error writing file: '%s': %s" % (file_name, e)) - return 1 - blkdev.close() - return 0 - else: - print("Can't find partition: '%s'" % part) - return 1 + def handle_rdisk(self, rdisk): + if len(self.opts) < 2: + print("Usage: export ") + return 1 + else: + part = self.opts[0] + file_name = self.opts[1] + p = rdisk.find_partition_by_string(part) + if p != None: + blkdev = p.create_blkdev() + blkdev.open() + num_blks = blkdev.num_blocks + print( + "exporting '%s' (%d blocks) to '%s'" + % (p.get_drive_name(), num_blks, file_name) + ) + try: + with open(file_name, "wb") as fh: + for b in range(num_blks): + data = blkdev.read_block(b) + fh.write(data) + except IOError as e: + print("Error writing file: '%s': %s" % (file_name, e)) + return 1 + blkdev.close() + return 0 + else: + print("Can't find partition: '%s'" % part) + return 1 + class ImportCommand(Command): - def handle_rdisk(self, rdisk): - if len(self.opts) < 2: - print("Usage: import ") - return 1 - else: - part = self.opts[0] - file_name = self.opts[1] - p = rdisk.find_partition_by_string(part) - if p != None: - part_dev = p.create_blkdev() - part_dev.open() - part_blks = part_dev.num_blocks - blk_size = part_dev.block_bytes - total = part_blks * blk_size - # open image - file_size = os.path.getsize(file_name) - file_blks = file_size // blk_size - if file_size % blk_size != 0: - print("image file not block size aligned!") - return 1 - # check sizes - if total < file_size: - print("import image too large: partition=%d != file=%d", - total, file_size) - return 1 - if total > file_size: - delta = total - file_size - print("WARNING: import file too small: %d unused blocks", delta) - print("importing '%s' (%d blocks) to '%s' (%d blocks)" % \ - (file_name, file_blks, p.get_drive_name(), part_blks)) - # copy image - with open(file_name, "rb") as fh: - for b in range(file_blks): - data = fh.read(blk_size) - part_dev.write_block(b, data) - part_dev.close() - return 0 - else: - print("Can't find partition: '%s'" % part) - return 1 + def handle_rdisk(self, rdisk): + if len(self.opts) < 2: + print("Usage: import ") + return 1 + else: + part = self.opts[0] + file_name = self.opts[1] + p = rdisk.find_partition_by_string(part) + if p != None: + part_dev = p.create_blkdev() + part_dev.open() + part_blks = part_dev.num_blocks + blk_size = part_dev.block_bytes + total = part_blks * blk_size + # open image + file_size = os.path.getsize(file_name) + file_blks = file_size // blk_size + if file_size % blk_size != 0: + print("image file not block size aligned!") + return 1 + # check sizes + if total < file_size: + print( + "import image too large: partition=%d != file=%d", + total, + file_size, + ) + return 1 + if total > file_size: + delta = total - file_size + print("WARNING: import file too small: %d unused blocks", delta) + print( + "importing '%s' (%d blocks) to '%s' (%d blocks)" + % (file_name, file_blks, p.get_drive_name(), part_blks) + ) + # copy image + with open(file_name, "rb") as fh: + for b in range(file_blks): + data = fh.read(blk_size) + part_dev.write_block(b, data) + part_dev.close() + return 0 + else: + print("Can't find partition: '%s'" % part) + return 1 + # --- Fill empty space with partitions --- + class FillCommand(PartEditCommand): - def handle_rdisk(self, rdisk): - self.parse_opts(rdisk) - ranges = rdisk.get_free_cyl_ranges() - # nothing to do - if ranges == None: - return 0 - for lo_hi in ranges: - drv_name = self.get_drv_name() - if drv_name == None: - print("ERROR: invalid drive name!") - dostype = self.get_dos_type() - if dostype == None: - print("ERROR: invalid dostype given!") - return 1 - flags = self.get_flags() - boot_pri = self.get_boot_pri() - more_dos_env = self.get_more_dos_env() - fs_bs = self.get_fs_block_size(empty=True) - print("creating: '%s' %s %s" % (drv_name, lo_hi, num_to_tag_str(dostype))) - # add partition - if not rdisk.add_partition(drv_name, lo_hi, dos_type=dostype, flags=flags, - boot_pri=boot_pri, more_dos_env=more_dos_env, - fs_block_size=fs_bs): - print("ERROR: creating partition: '%s': %s" % (drv_name, lo_hi)) - return 1 - return 0 + def handle_rdisk(self, rdisk): + self.parse_opts(rdisk) + ranges = rdisk.get_free_cyl_ranges() + # nothing to do + if ranges == None: + return 0 + for lo_hi in ranges: + drv_name = self.get_drv_name() + if drv_name == None: + print("ERROR: invalid drive name!") + dostype = self.get_dos_type() + if dostype == None: + print("ERROR: invalid dostype given!") + return 1 + flags = self.get_flags() + boot_pri = self.get_boot_pri() + more_dos_env = self.get_more_dos_env() + fs_bs = self.get_fs_block_size(empty=True) + print("creating: '%s' %s %s" % (drv_name, lo_hi, num_to_tag_str(dostype))) + # add partition + if not rdisk.add_partition( + drv_name, + lo_hi, + dos_type=dostype, + flags=flags, + boot_pri=boot_pri, + more_dos_env=more_dos_env, + fs_block_size=fs_bs, + ): + print("ERROR: creating partition: '%s': %s" % (drv_name, lo_hi)) + return 1 + return 0 + # --- Delete partition command --- + class DeleteCommand(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - - def handle_rdisk(self, rdisk): - if len(self.opts) < 1: - print("Usage: delete ") - return 1 - else: - p = rdisk.find_partition_by_string(self.opts[0]) - if p != None: - if not rdisk.delete_partition(p.num): - print("ERROR: deleting partition: '%s'" % self.opts[0]) - return 1 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def handle_rdisk(self, rdisk): + if len(self.opts) < 1: + print("Usage: delete ") + return 1 else: - return 0 - else: - print("Can't find partition: '%s'" % self.opts[0]) - return 1 + p = rdisk.find_partition_by_string(self.opts[0]) + if p != None: + if not rdisk.delete_partition(p.num): + print("ERROR: deleting partition: '%s'" % self.opts[0]) + return 1 + else: + return 0 + else: + print("Can't find partition: '%s'" % self.opts[0]) + return 1 + # --- Filesystem Commands --- + class FSGetCommand(Command): - def handle_rdisk(self, rdisk): - if len(self.opts) < 2: - print("Usage: fsget ") - return 1 - else: - num = int(self.opts[0]) - fs = rdisk.get_filesystem(num) - if fs == None: - print("fsget: invalid filesystem index",num) - return 1 - else: - file_name = self.opts[1] - data = fs.get_data() - f = open(file_name,"wb") - f.write(data) - f.close() - return 0 + def handle_rdisk(self, rdisk): + if len(self.opts) < 2: + print("Usage: fsget ") + return 1 + else: + num = int(self.opts[0]) + fs = rdisk.get_filesystem(num) + if fs == None: + print("fsget: invalid filesystem index", num) + return 1 + else: + file_name = self.opts[1] + data = fs.get_data() + f = open(file_name, "wb") + f.write(data) + f.close() + return 0 + class FSAddCommand(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - - def parse_opts(self): - self.popts = KeyValue.parse_key_value_strings(self.opts) - - def get_dos_type(self): - if 'fs' in self.popts: - fs_str = self.popts['fs'] - elif 'dostype' in self.popts: - fs_str = self.popts['dostype'] - else: - fs_str = self.args.dostype - return parse_dos_type_str(str(fs_str)) - - def handle_rdisk(self, rdisk): - self.parse_opts() - valid_flags = FSHeaderDeviceNode.valid_flags - if len(self.opts) < 1: - flag_info = ["[%s=]" % x for x in valid_flags] - flag_info = " ".join(flag_info) - print("Usage: fsadd [dostype=] [version=] " + flag_info) - return 1 - else: - # parse options - opts = KeyValue.parse_key_value_strings(self.opts) - # read file data - file_name = self.opts[0] - f = open(file_name,"rb") - data = f.read() - f.close() - # get version from binary - tag = VerTag.find(data) - ver = None - if tag != None: - ver = VerTag.get_version(tag) - if ver == None: - ver = (0,0) - # overwrite version from options - if 'version' in opts: - vstr = opts['version'] - pos = vstr.find('.') - if pos != -1: - ver = (int(vstr[:pos]),int(vstr[pos+1:])) - # valid fs flags - dev_flags = [] - for key in opts: - if key in valid_flags: - dev_flags.append((key,opts[key])) - # add fs - version = ver[0] << 16 | ver[1] - # get dostype - dostype = self.get_dos_type() - if rdisk.add_filesystem(data, dos_type=dostype, version=version, dev_flags=dev_flags): - return 0 - else: - print("ERROR adding filesystem! (no space in RDB left)") - return 1 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def parse_opts(self): + self.popts = KeyValue.parse_key_value_strings(self.opts) + + def get_dos_type(self): + if "fs" in self.popts: + fs_str = self.popts["fs"] + elif "dostype" in self.popts: + fs_str = self.popts["dostype"] + else: + fs_str = self.args.dostype + return parse_dos_type_str(str(fs_str)) + + def handle_rdisk(self, rdisk): + self.parse_opts() + valid_flags = FSHeaderDeviceNode.valid_flags + if len(self.opts) < 1: + flag_info = ["[%s=]" % x for x in valid_flags] + flag_info = " ".join(flag_info) + print( + "Usage: fsadd [dostype=] [version=] " + + flag_info + ) + return 1 + else: + # parse options + opts = KeyValue.parse_key_value_strings(self.opts) + # read file data + file_name = self.opts[0] + f = open(file_name, "rb") + data = f.read() + f.close() + # get version from binary + tag = VerTag.find(data) + ver = None + if tag != None: + ver = VerTag.get_version(tag) + if ver == None: + ver = (0, 0) + # overwrite version from options + if "version" in opts: + vstr = opts["version"] + pos = vstr.find(".") + if pos != -1: + ver = (int(vstr[:pos]), int(vstr[pos + 1 :])) + # valid fs flags + dev_flags = [] + for key in opts: + if key in valid_flags: + dev_flags.append((key, opts[key])) + # add fs + version = ver[0] << 16 | ver[1] + # get dostype + dostype = self.get_dos_type() + if rdisk.add_filesystem( + data, dos_type=dostype, version=version, dev_flags=dev_flags + ): + return 0 + else: + print("ERROR adding filesystem! (no space in RDB left)") + return 1 + class FSDeleteCommand(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - - def handle_rdisk(self, rdisk): - if len(self.opts) < 1: - print("Usage: fsdelete ") - return 1 - else: - fs = rdisk.find_filesystem_by_string(self.opts[0]) - if fs != None: - if not rdisk.delete_filesystem(fs.num): - print("ERROR deleting filesystem: '%s'" % self.opts[0]) - return 1 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def handle_rdisk(self, rdisk): + if len(self.opts) < 1: + print("Usage: fsdelete ") + return 1 else: - return 0 - else: - print("ERROR finding filesystem: '%s'" % self.opts[0]) - return 1 + fs = rdisk.find_filesystem_by_string(self.opts[0]) + if fs != None: + if not rdisk.delete_filesystem(fs.num): + print("ERROR deleting filesystem: '%s'" % self.opts[0]) + return 1 + else: + return 0 + else: + print("ERROR finding filesystem: '%s'" % self.opts[0]) + return 1 + class FSFlagsCommand(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - - def handle_rdisk(self, rdisk): - if len(self.opts) < 2: - print("Usage: fsflags [ clear | key= ... ]") - return 1 - else: - fs = rdisk.find_filesystem_by_string(self.opts[0]) - if fs != None: - opts = KeyValue.parse_key_value_strings(self.opts[1:]) - valid_flags = fs.get_valid_flag_names() - flags = [] - clear = False - for o in opts: - if o in valid_flags: - flags.append((o,opts[o])) - elif o == 'clear': - clear = True - fs.set_flags(flags, clear) - return 0 - else: - print("ERROR finding filesystem: '%s'" % self.opts[0]) - return 1 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def handle_rdisk(self, rdisk): + if len(self.opts) < 2: + print("Usage: fsflags [ clear | key= ... ]") + return 1 + else: + fs = rdisk.find_filesystem_by_string(self.opts[0]) + if fs != None: + opts = KeyValue.parse_key_value_strings(self.opts[1:]) + valid_flags = fs.get_valid_flag_names() + flags = [] + clear = False + for o in opts: + if o in valid_flags: + flags.append((o, opts[o])) + elif o == "clear": + clear = True + fs.set_flags(flags, clear) + return 0 + else: + print("ERROR finding filesystem: '%s'" % self.opts[0]) + return 1 + # ----- main ----- def main(): - # call scanner and process all files with selected command - cmd_map = { - "open" : OpenCommand, - "create" : CreateCommand, - "init" : InitCommand, - "info" : InfoCommand, - "show" : ShowCommand, - "free" : FreeCommand, - "add" : AddCommand, - "fill" : FillCommand, - "fsget" : FSGetCommand, - "fsadd" : FSAddCommand, - "fsdelete" : FSDeleteCommand, - "fsflags" : FSFlagsCommand, - "map" : MapCommand, - "delete" : DeleteCommand, - "change" : ChangeCommand, - "export" : ExportCommand, - "import" : ImportCommand - } - - parser = argparse.ArgumentParser() - parser.add_argument('image_file') - parser.add_argument('command_list', nargs='+', help="command: "+",".join(list(cmd_map.keys()))) - parser.add_argument('-v', '--verbose', action='store_true', default=False, help="be more verbos") - parser.add_argument('-s', '--seperator', default='+', help="set the command separator char sequence") - parser.add_argument('-r', '--read-only', action='store_true', default=False, help="read-only operation") - parser.add_argument('-f', '--force', action='store_true', default=False, help="force overwrite existing image") - parser.add_argument('-p', '--drive-prefix', default='DH', help="set default drive name prefix (DH -> DH0, DH1, ...)") - parser.add_argument('-t', '--dostype', default='ffs+intl', help="set default dos type") - args = parser.parse_args() - - cmd_list = args.command_list - sep = args.seperator - queue = FSCommandQueue(args, cmd_list, sep, cmd_map) - code = queue.run() - return code - - -if __name__ == '__main__': - sys.exit(main()) + # call scanner and process all files with selected command + cmd_map = { + "open": OpenCommand, + "create": CreateCommand, + "init": InitCommand, + "info": InfoCommand, + "show": ShowCommand, + "free": FreeCommand, + "add": AddCommand, + "fill": FillCommand, + "fsget": FSGetCommand, + "fsadd": FSAddCommand, + "fsdelete": FSDeleteCommand, + "fsflags": FSFlagsCommand, + "map": MapCommand, + "delete": DeleteCommand, + "change": ChangeCommand, + "export": ExportCommand, + "import": ImportCommand, + } + + parser = argparse.ArgumentParser() + parser.add_argument("image_file") + parser.add_argument( + "command_list", nargs="+", help="command: " + ",".join(list(cmd_map.keys())) + ) + parser.add_argument( + "-v", "--verbose", action="store_true", default=False, help="be more verbos" + ) + parser.add_argument( + "-s", "--seperator", default="+", help="set the command separator char sequence" + ) + parser.add_argument( + "-r", + "--read-only", + action="store_true", + default=False, + help="read-only operation", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + help="force overwrite existing image", + ) + parser.add_argument( + "-p", + "--drive-prefix", + default="DH", + help="set default drive name prefix (DH -> DH0, DH1, ...)", + ) + parser.add_argument( + "-t", "--dostype", default="ffs+intl", help="set default dos type" + ) + args = parser.parse_args() + + cmd_list = args.command_list + sep = args.seperator + queue = FSCommandQueue(args, cmd_list, sep, cmd_map) + code = queue.run() + return code + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/amitools/tools/romtool.py b/amitools/tools/romtool.py index c1b9f214..6cb92ee0 100644 --- a/amitools/tools/romtool.py +++ b/amitools/tools/romtool.py @@ -72,8 +72,7 @@ def do_split_cmd(args): logging.info("writing index to '%s'", idx_path) rs.write_index_file(idx_path) # extract entries - logging.debug("extract module: fixes=%s, patches=%s", - args.fixes, args.patches) + logging.debug("extract module: fixes=%s, patches=%s", args.fixes, args.patches) bfh = BinFmtHunk() for e in entries: rs.print_entry(logging.info, e) @@ -97,18 +96,31 @@ def do_build_cmd(args): rom_rev = list(map(int, rom_rev.split("."))) add_footer = args.add_footer # select rom builder - if rom_type == 'kick': + if rom_type == "kick": logging.info("building %d KiB Kick ROM @%08x", rom_size, kick_addr) - rb = rom.KickRomBuilder(rom_size, - base_addr=kick_addr, fill_byte=fill_byte, - kickety_split=kickety_split, rom_ver=rom_rev) - elif rom_type == 'ext': - logging.info("building %d KiB Ext ROM @%08x Rev %r for Kick @%08x", - rom_size, ext_addr, rom_rev, kick_addr) - rb = rom.ExtRomBuilder(rom_size, - base_addr=ext_addr, fill_byte=fill_byte, - add_footer=add_footer, rom_ver=rom_rev, - kick_addr=kick_addr) + rb = rom.KickRomBuilder( + rom_size, + base_addr=kick_addr, + fill_byte=fill_byte, + kickety_split=kickety_split, + rom_ver=rom_rev, + ) + elif rom_type == "ext": + logging.info( + "building %d KiB Ext ROM @%08x Rev %r for Kick @%08x", + rom_size, + ext_addr, + rom_rev, + kick_addr, + ) + rb = rom.ExtRomBuilder( + rom_size, + base_addr=ext_addr, + fill_byte=fill_byte, + add_footer=add_footer, + rom_ver=rom_rev, + kick_addr=kick_addr, + ) else: logging.error("Unknown rom_type=%s", rom_type) return 1 @@ -130,8 +142,7 @@ def do_build_cmd(args): if bkm.get_type() == "module": bkm.fix_module() elif bkm.get_type() == "patch": - logging.error("BlizKick Patches are not supported, yet: %s", - name) + logging.error("BlizKick Patches are not supported, yet: %s", name) return 5 # get image params size = bin_img.get_size() @@ -160,16 +171,18 @@ def do_build_cmd(args): logging.info("@%08x: adding module '%s'", off, f) e = rb.add_bin_img(name, bin_img) if e is None: - logging.error("@%08x: can't add module '%s': %s", - off, f, rb.get_error()) + logging.error( + "@%08x: can't add module '%s': %s", off, f, rb.get_error() + ) return 3 # add data else: logging.info("@%08x: adding raw data '%s'", off, f) e = rb.add_module(name, data) if e is None: - logging.error("@%08x: can't add raw data '%s': %s", - off, f, rb.get_error()) + logging.error( + "@%08x: can't add raw data '%s': %s", off, f, rb.get_error() + ) return 3 # add long word padding? @@ -178,14 +191,14 @@ def do_build_cmd(args): logging.info("@%08x: adding padding: +%d" % (off, padding)) e = rb.add_padding(padding) if e is None: - logging.error("@%08x: can't add padding: %s", - off, rb.get_error()) + logging.error("@%08x: can't add padding: %s", off, rb.get_error()) return 3 # build rom off = rb.get_rom_offset() - logging.info("@%08x: padding %d bytes with %02x", - off, rb.get_bytes_left(), fill_byte) + logging.info( + "@%08x: padding %d bytes with %02x", off, rb.get_bytes_left(), fill_byte + ) rom_data = rb.build_rom() if rom_data is None: logging.error("building ROM failed: %s", rb.get_error()) @@ -212,8 +225,7 @@ def do_diff_cmd(args): size_a = len(rom_a) size_b = len(rom_b) if not args.force and size_a != size_b: - logging.error( - "ROM differ in size (%08x != %08x). Aborting", size_a, size_b) + logging.error("ROM differ in size (%08x != %08x). Aborting", size_a, size_b) return 2 # do diff base_addr = 0 @@ -226,8 +238,9 @@ def do_diff_cmd(args): else: logging.error("Not a KickROM! Can't detect base address.") return 3 - print_hex_diff(rom_a, rom_b, num=args.columns, show_same=args.same, - base_addr=base_addr) + print_hex_diff( + rom_a, rom_b, num=args.columns, show_same=args.same, base_addr=base_addr + ) def do_dump_cmd(args): @@ -266,24 +279,24 @@ def do_info_cmd(args): rom_img = rom.Loader.load(img) kh = rom.KickRomAccess(rom_img) checks = [ - ('size', kh.check_size()), - ('header', kh.check_header()), - ('footer', kh.check_footer()), - ('size_field', kh.check_size()), - ('chk_sum', kh.verify_check_sum()), - ('kickety_split', kh.check_kickety_split()), - ('magic_reset', kh.check_magic_reset()), - ('is_kick', kh.is_kick_rom()) + ("size", kh.check_size()), + ("header", kh.check_header()), + ("footer", kh.check_footer()), + ("size_field", kh.check_size()), + ("chk_sum", kh.verify_check_sum()), + ("kickety_split", kh.check_kickety_split()), + ("magic_reset", kh.check_magic_reset()), + ("is_kick", kh.is_kick_rom()), ] c = ["%-20s %s" % (x[0], "ok" if x[1] else "NOK") for x in checks] for i in c: print(i) values = [ - ('check_sum', '%08x', kh.read_check_sum()), - ('base_addr', '%08x', kh.get_base_addr()), - ('boot_pc', '%08x', kh.read_boot_pc()), - ('rom_rev', '%d.%d', kh.read_rom_ver_rev()), - ('exec_rev', '%d.%d', kh.read_exec_ver_rev()) + ("check_sum", "%08x", kh.read_check_sum()), + ("base_addr", "%08x", kh.get_base_addr()), + ("boot_pc", "%08x", kh.read_boot_pc()), + ("rom_rev", "%d.%d", kh.read_rom_ver_rev()), + ("exec_rev", "%d.%d", kh.read_exec_ver_rev()), ] v = ["%-20s %s" % (x[0], x[1] % x[2]) for x in values] for i in v: @@ -354,7 +367,7 @@ def do_combine_cmd(args): if ka.get_size_kib() != 512: logging.error("Not a 512 MiB Kick ROM image!") return 2 - if ka.get_base_addr() != 0xf80000: + if ka.get_base_addr() != 0xF80000: logging.error("Kick ROM base address is not 0xf80000!") return 3 # check ext @@ -423,112 +436,176 @@ def do_scan_cmd(args): print(spc, "init off: %08x" % r.init_off) print(spc, "skip off: %08x" % r.skip_off) else: - print("@%08x +%08x %-12s %+4d %s %s" % - (off, r.skip_off, nt, r.pri, name, id_string)) + print( + "@%08x +%08x %-12s %+4d %s %s" + % (off, r.skip_off, nt, r.pri, name, id_string) + ) def setup_list_parser(parser): - parser.add_argument('-r', '--rom', default=None, - help='query rom name by wildcard') - parser.add_argument('-m', '--modules', default=False, action='store_true', - help="show entries of ROMs") + parser.add_argument("-r", "--rom", default=None, help="query rom name by wildcard") + parser.add_argument( + "-m", + "--modules", + default=False, + action="store_true", + help="show entries of ROMs", + ) parser.set_defaults(cmd=do_list_cmd) def setup_query_parser(parser): - parser.add_argument('rom_image', - help='rom image to be checked') - parser.add_argument('-m', '--modules', default=None, - help='query module by wildcard') + parser.add_argument("rom_image", help="rom image to be checked") + parser.add_argument( + "-m", "--modules", default=None, help="query module by wildcard" + ) parser.set_defaults(cmd=do_query_cmd) def setup_split_parser(parser): - parser.add_argument('rom_image', - help='rom image file to be split') - parser.add_argument('-o', '--output-dir', - help='store modules in this base dir') - parser.add_argument('-m', '--modules', default=None, - help='query module by wildcard') - parser.add_argument('--no-version-dir', default=False, action='store_true', - help="do not create sub directory with version name") - parser.add_argument('--no-index', default=False, action='store_true', - help="do not create an 'index.txt' in output path") - parser.add_argument('-p', '--patches', default=False, action='store_true', - help='apply optional patches to modules') - parser.add_argument('-f', '--no-fixes', dest='fixes', default=True, - action='store_false', - help='do not apply available fixes to modules') + parser.add_argument("rom_image", help="rom image file to be split") + parser.add_argument("-o", "--output-dir", help="store modules in this base dir") + parser.add_argument( + "-m", "--modules", default=None, help="query module by wildcard" + ) + parser.add_argument( + "--no-version-dir", + default=False, + action="store_true", + help="do not create sub directory with version name", + ) + parser.add_argument( + "--no-index", + default=False, + action="store_true", + help="do not create an 'index.txt' in output path", + ) + parser.add_argument( + "-p", + "--patches", + default=False, + action="store_true", + help="apply optional patches to modules", + ) + parser.add_argument( + "-f", + "--no-fixes", + dest="fixes", + default=True, + action="store_false", + help="do not apply available fixes to modules", + ) parser.set_defaults(cmd=do_split_cmd) def setup_build_parser(parser): - parser.add_argument('modules', default=None, nargs='+', - help='modules or index.txt files to be added') - parser.add_argument('-o', '--output', - help='rom image file to be built') - parser.add_argument('-t', '--rom-type', default='kick', - help="what type of ROM to build (kick, ext)") - parser.add_argument('-s', '--rom-size', default=512, type=int, - help="size of ROM in KiB") - parser.add_argument('-a', '--kick-addr', default="f80000", - help="base address of Kick ROM in hex") - parser.add_argument('-e', '--ext-addr', default="e00000", - help="base address of Ext ROM in hex") - parser.add_argument('-f', '--add-footer', default=False, - action='store_true', - help="add footer with check sum to Ext ROM") - parser.add_argument('-r', '--rom-rev', default=None, - help="set ROM revision, e.g. 45.10") - parser.add_argument('-k', '--kickety_split', default=False, - action='store_true', - help="add 'kickety split' romhdr at center of ROM") - parser.add_argument('-b', '--fill-byte', default='ff', - help="fill byte in hex for empty ranges") + parser.add_argument( + "modules", + default=None, + nargs="+", + help="modules or index.txt files to be added", + ) + parser.add_argument("-o", "--output", help="rom image file to be built") + parser.add_argument( + "-t", "--rom-type", default="kick", help="what type of ROM to build (kick, ext)" + ) + parser.add_argument( + "-s", "--rom-size", default=512, type=int, help="size of ROM in KiB" + ) + parser.add_argument( + "-a", "--kick-addr", default="f80000", help="base address of Kick ROM in hex" + ) + parser.add_argument( + "-e", "--ext-addr", default="e00000", help="base address of Ext ROM in hex" + ) + parser.add_argument( + "-f", + "--add-footer", + default=False, + action="store_true", + help="add footer with check sum to Ext ROM", + ) + parser.add_argument( + "-r", "--rom-rev", default=None, help="set ROM revision, e.g. 45.10" + ) + parser.add_argument( + "-k", + "--kickety_split", + default=False, + action="store_true", + help="add 'kickety split' romhdr at center of ROM", + ) + parser.add_argument( + "-b", "--fill-byte", default="ff", help="fill byte in hex for empty ranges" + ) parser.set_defaults(cmd=do_build_cmd) def setup_diff_parser(parser): - parser.add_argument('image_a', help='rom image a') - parser.add_argument('image_b', help='rom image b') - parser.add_argument('-s', '--same', default=False, action='store_true', - help="show same lines of ROMs") - parser.add_argument('-a', '--show-address', default=False, - action='store_true', - help="show KickROM address (otherwise image offset)") - parser.add_argument('-b', '--rom-addr', default=None, - help="use hex base address for output") - parser.add_argument('-f', '--force', default=False, action='store_true', - help="diff ROMs even if size differs") - parser.add_argument('-c', '--columns', default=8, type=int, - help="number of bytes shown per line") + parser.add_argument("image_a", help="rom image a") + parser.add_argument("image_b", help="rom image b") + parser.add_argument( + "-s", + "--same", + default=False, + action="store_true", + help="show same lines of ROMs", + ) + parser.add_argument( + "-a", + "--show-address", + default=False, + action="store_true", + help="show KickROM address (otherwise image offset)", + ) + parser.add_argument( + "-b", "--rom-addr", default=None, help="use hex base address for output" + ) + parser.add_argument( + "-f", + "--force", + default=False, + action="store_true", + help="diff ROMs even if size differs", + ) + parser.add_argument( + "-c", "--columns", default=8, type=int, help="number of bytes shown per line" + ) parser.set_defaults(cmd=do_diff_cmd) def setup_dump_parser(parser): - parser.add_argument('image', help='rom image to be dumped') - parser.add_argument('-a', '--show-address', default=False, - action='store_true', - help="show KickROM address (otherwise image offset)") - parser.add_argument('-b', '--rom-addr', default=None, - help="use hex base address for output") - parser.add_argument('-c', '--columns', default=16, type=int, - help="number of bytes shown per line") + parser.add_argument("image", help="rom image to be dumped") + parser.add_argument( + "-a", + "--show-address", + default=False, + action="store_true", + help="show KickROM address (otherwise image offset)", + ) + parser.add_argument( + "-b", "--rom-addr", default=None, help="use hex base address for output" + ) + parser.add_argument( + "-c", "--columns", default=16, type=int, help="number of bytes shown per line" + ) parser.set_defaults(cmd=do_dump_cmd) def setup_info_parser(parser): - parser.add_argument('image', help='rom image to be analyzed') + parser.add_argument("image", help="rom image to be analyzed") parser.set_defaults(cmd=do_info_cmd) def setup_patch_parser(parser): - parser.add_argument('image', - help='rom image to be patched') - parser.add_argument('patches', default=None, nargs='+', - help='patches to be applied: name[:arg1[=val1],...]') - parser.add_argument('-o', '--output', - help='rom image file to be built') + parser.add_argument("image", help="rom image to be patched") + parser.add_argument( + "patches", + default=None, + nargs="+", + help="patches to be applied: name[:arg1[=val1],...]", + ) + parser.add_argument("-o", "--output", help="rom image file to be built") parser.set_defaults(cmd=do_patch_cmd) @@ -537,28 +614,40 @@ def setup_patches_parser(parser): def setup_combine_parser(parser): - parser.add_argument('kick_rom', help='kick rom to be combined') - parser.add_argument('ext_rom', help='ext rom to be combined') + parser.add_argument("kick_rom", help="kick rom to be combined") + parser.add_argument("ext_rom", help="ext rom to be combined") parser.set_defaults(cmd=do_combine_cmd) - parser.add_argument('-o', '--output', - help='rom image file to be built') + parser.add_argument("-o", "--output", help="rom image file to be built") def setup_scan_parser(parser): - parser.add_argument('image', help='rom image to be scanned') - parser.add_argument('-b', '--rom-addr', default=None, - help="use this base address for ROM. otherwise guess.") - parser.add_argument('-i', '--show-info', default=False, - action='store_true', - help="show more details on resident") + parser.add_argument("image", help="rom image to be scanned") + parser.add_argument( + "-b", + "--rom-addr", + default=None, + help="use this base address for ROM. otherwise guess.", + ) + parser.add_argument( + "-i", + "--show-info", + default=False, + action="store_true", + help="show more details on resident", + ) parser.set_defaults(cmd=do_scan_cmd) def setup_copy_parser(parser): - parser.add_argument('in_image', help='rom image to read') - parser.add_argument('out_image', help='rom image to be written') - parser.add_argument('-c', '--fix-checksum', default=False, action='store_true', - help="fix checksum on written image") + parser.add_argument("in_image", help="rom image to read") + parser.add_argument("out_image", help="rom image to be written") + parser.add_argument( + "-c", + "--fix-checksum", + default=False, + action="store_true", + help="fix checksum on written image", + ) parser.set_defaults(cmd=do_copy_cmd) @@ -567,63 +656,63 @@ def parse_args(): parser = argparse.ArgumentParser(description=DESC) # global options - parser.add_argument('-k', '--rom-key', default='rom.key', - help='the path of a rom.key file if you want to ' - 'process crypted ROMs') + parser.add_argument( + "-k", + "--rom-key", + default="rom.key", + help="the path of a rom.key file if you want to " "process crypted ROMs", + ) add_logging_options(parser) # sub parsers sub_parsers = parser.add_subparsers(help="sub commands") # build - build_parser = sub_parsers.add_parser( - 'build', help='build a ROM from modules') + build_parser = sub_parsers.add_parser("build", help="build a ROM from modules") setup_build_parser(build_parser) # combine combine_parser = sub_parsers.add_parser( - 'combine', help='combine a kick and an ext ROM to a 1 MiB ROM') + "combine", help="combine a kick and an ext ROM to a 1 MiB ROM" + ) setup_combine_parser(combine_parser) # diff diff_parser = sub_parsers.add_parser( - 'diff', help='show differences in two ROM images') + "diff", help="show differences in two ROM images" + ) setup_diff_parser(diff_parser) # dump - dump_parser = sub_parsers.add_parser('dump', help='dump a ROM image') + dump_parser = sub_parsers.add_parser("dump", help="dump a ROM image") setup_dump_parser(dump_parser) # info - info_parser = sub_parsers.add_parser( - 'info', help='print infos on a ROM image') + info_parser = sub_parsers.add_parser("info", help="print infos on a ROM image") setup_info_parser(info_parser) # list - list_parser = sub_parsers.add_parser( - 'list', help='list ROMs in split data') + list_parser = sub_parsers.add_parser("list", help="list ROMs in split data") setup_list_parser(list_parser) # patch - patch_parser = sub_parsers.add_parser('patch', help='patch a ROM image') + patch_parser = sub_parsers.add_parser("patch", help="patch a ROM image") setup_patch_parser(patch_parser) # patches - patches_parser = sub_parsers.add_parser( - 'patches', help='show available patches') + patches_parser = sub_parsers.add_parser("patches", help="show available patches") setup_patches_parser(patches_parser) # query - query_parser = sub_parsers.add_parser( - 'query', help='query if ROM is in split data') + query_parser = sub_parsers.add_parser("query", help="query if ROM is in split data") setup_query_parser(query_parser) # scan - scan_parser = sub_parsers.add_parser('scan', help='scan ROM for residents') + scan_parser = sub_parsers.add_parser("scan", help="scan ROM for residents") setup_scan_parser(scan_parser) # split - split_parser = sub_parsers.add_parser( - 'split', help='split a ROM into modules') + split_parser = sub_parsers.add_parser("split", help="split a ROM into modules") setup_split_parser(split_parser) # copy copy_parser = sub_parsers.add_parser( - 'copy', help='copy ROM image and fix on the fly') + "copy", help="copy ROM image and fix on the fly" + ) setup_copy_parser(copy_parser) # parse args = parser.parse_args() - if 'cmd' not in args: + if "cmd" not in args: parser.print_help() sys.exit(1) return args @@ -645,5 +734,5 @@ def main(): # ----- entry point ----- -if __name__ == '__main__': +if __name__ == "__main__": sys.exit(main()) diff --git a/amitools/tools/typetool.py b/amitools/tools/typetool.py index 30eaf977..f7361364 100755 --- a/amitools/tools/typetool.py +++ b/amitools/tools/typetool.py @@ -12,15 +12,15 @@ def main(): - cfg_files = ( - # first look in current dir - os.path.join(os.getcwd(), ".vamosrc"), - # then in home dir - os.path.expanduser("~/.vamosrc"), - ) - tools = [TypeTool()] - sys.exit(tools_main(tools, cfg_files)) + cfg_files = ( + # first look in current dir + os.path.join(os.getcwd(), ".vamosrc"), + # then in home dir + os.path.expanduser("~/.vamosrc"), + ) + tools = [TypeTool()] + sys.exit(tools_main(tools, cfg_files)) -if __name__ == '__main__': - sys.exit(main()) +if __name__ == "__main__": + sys.exit(main()) diff --git a/amitools/tools/vamos.py b/amitools/tools/vamos.py index cde4d439..8214474f 100755 --- a/amitools/tools/vamos.py +++ b/amitools/tools/vamos.py @@ -13,27 +13,25 @@ def main(): - cfg_files = ( - # first look in current dir - os.path.join(os.getcwd(), ".vamosrc"), - # then in home dir - os.path.expanduser("~/.vamosrc"), - ) - # profile run? - if 'VAMOS_PROFILE' in os.environ: - vamos_profile = os.environ['VAMOS_PROFILE'] - if vamos_profile == 'dump': - profile_file = None + cfg_files = ( + # first look in current dir + os.path.join(os.getcwd(), ".vamosrc"), + # then in home dir + os.path.expanduser("~/.vamosrc"), + ) + # profile run? + if "VAMOS_PROFILE" in os.environ: + vamos_profile = os.environ["VAMOS_PROFILE"] + if vamos_profile == "dump": + profile_file = None + else: + profile_file = vamos_profile + ret_code = main_profile(cfg_files, profile_file=profile_file, dump_profile=True) + # regular run else: - profile_file = vamos_profile - ret_code = main_profile(cfg_files, - profile_file=profile_file, - dump_profile=True) - # regular run - else: - ret_code = vmain(cfg_files) - sys.exit(ret_code) + ret_code = vmain(cfg_files) + sys.exit(ret_code) -if __name__ == '__main__': - main() +if __name__ == "__main__": + main() diff --git a/amitools/tools/vamospath.py b/amitools/tools/vamospath.py index 985beab2..7229f6e8 100755 --- a/amitools/tools/vamospath.py +++ b/amitools/tools/vamospath.py @@ -12,15 +12,15 @@ def main(): - cfg_files = ( - # first look in current dir - os.path.join(os.getcwd(), ".vamosrc"), - # then in home dir - os.path.expanduser("~/.vamosrc"), - ) - tools = [PathTool()] - sys.exit(tools_main(tools, cfg_files)) + cfg_files = ( + # first look in current dir + os.path.join(os.getcwd(), ".vamosrc"), + # then in home dir + os.path.expanduser("~/.vamosrc"), + ) + tools = [PathTool()] + sys.exit(tools_main(tools, cfg_files)) -if __name__ == '__main__': - sys.exit(main()) +if __name__ == "__main__": + sys.exit(main()) diff --git a/amitools/tools/vamostool.py b/amitools/tools/vamostool.py index 8de950d5..dd345002 100644 --- a/amitools/tools/vamostool.py +++ b/amitools/tools/vamostool.py @@ -11,15 +11,15 @@ def main(): - cfg_files = ( - # first look in current dir - os.path.join(os.getcwd(), ".vamosrc"), - # then in home dir - os.path.expanduser("~/.vamosrc"), - ) - tools = [PathTool(), TypeTool(), LibProfilerTool()] - sys.exit(tools_main(tools, cfg_files)) + cfg_files = ( + # first look in current dir + os.path.join(os.getcwd(), ".vamosrc"), + # then in home dir + os.path.expanduser("~/.vamosrc"), + ) + tools = [PathTool(), TypeTool(), LibProfilerTool()] + sys.exit(tools_main(tools, cfg_files)) -if __name__ == '__main__': - sys.exit(main()) +if __name__ == "__main__": + sys.exit(main()) diff --git a/amitools/tools/xdfscan.py b/amitools/tools/xdfscan.py index a0ce8d43..02026537 100755 --- a/amitools/tools/xdfscan.py +++ b/amitools/tools/xdfscan.py @@ -3,8 +3,6 @@ # quickly scan large sets of Amiga disk image files - - import sys import argparse import os.path @@ -16,161 +14,203 @@ # ----- logging ----- + class MyProgress(Progress): - def __init__(self): - Progress.__init__(self) - self.clk = int(time.perf_counter() * 1000) - def begin(self, msg): - Progress.begin(self, msg) - def add(self): - Progress.add(self) - clk = int(time.perf_counter() * 1000) - delta = clk - self.clk - # update display every 250ms - if delta > 250: - self.clk = clk - print("%s: %d\r" % (self.msg, self.num)), - sys.stdout.flush() + def __init__(self): + Progress.__init__(self) + self.clk = int(time.perf_counter() * 1000) + + def begin(self, msg): + Progress.begin(self, msg) + + def add(self): + Progress.add(self) + clk = int(time.perf_counter() * 1000) + delta = clk - self.clk + # update display every 250ms + if delta > 250: + self.clk = clk + print("%s: %d\r" % (self.msg, self.num)), + sys.stdout.flush() + def pre_log_path(path, msg): - print("%20s %s \r" % (msg, path)), - sys.stdout.flush() + print("%20s %s \r" % (msg, path)), + sys.stdout.flush() + def log_path(path, msg): - print("%20s %s " % (msg, path)) + print("%20s %s " % (msg, path)) + def print_block(percent): - print("%3.1f%%\r" % (percent / 10.0)), - sys.stdout.flush() + print("%3.1f%%\r" % (percent / 10.0)), + sys.stdout.flush() + # ----- scanner ----- factory = BlkDevFactory() + def scan(path, args): - if not os.path.exists(path): - log_path(path, "DOES NOT EXIST") - return 1 - if os.path.isdir(path): - return scan_dir(path, args) - elif os.path.isfile(path): - return scan_file(path, args) + if not os.path.exists(path): + log_path(path, "DOES NOT EXIST") + return 1 + if os.path.isdir(path): + return scan_dir(path, args) + elif os.path.isfile(path): + return scan_file(path, args) + def scan_dir(path, args): - for name in sorted(os.listdir(path)): - epath = os.path.join(path, name) - result = scan(epath, args) - if result != 0: - return result - return 0 + for name in sorted(os.listdir(path)): + epath = os.path.join(path, name) + result = scan(epath, args) + if result != 0: + return result + return 0 + def check_extension(path, args): - ext = [] - if not args.skip_disks: - ext += ['.adf','.adz','.adf.gz'] - if not args.skip_hds: - ext += ['.hdf'] - for a in ext: - if path.endswith(a): - return True - return False + ext = [] + if not args.skip_disks: + ext += [".adf", ".adz", ".adf.gz"] + if not args.skip_hds: + ext += [".hdf"] + for a in ext: + if path.endswith(a): + return True + return False + def scan_file(path, args): - if not check_extension(path, args): - return 0 - try: - pre_log_path(path,"scan") - ret_code = 0 - ret_str = "" - stay = True - - # create a block device for image file - blkdev = factory.open(path, read_only=True) - - # create validator - progress = MyProgress() - v = Validator(blkdev, min_level=args.level, debug=args.debug, progress=progress) - - # 1. check boot block - res = [] - boot_dos, bootable = v.scan_boot() - if boot_dos: - # 2. check root block - root = v.scan_root() - if not root: - # disk is bootable - if bootable: - res.append("boot") + if not check_extension(path, args): + return 0 + try: + pre_log_path(path, "scan") + ret_code = 0 + ret_str = "" + stay = True + + # create a block device for image file + blkdev = factory.open(path, read_only=True) + + # create validator + progress = MyProgress() + v = Validator(blkdev, min_level=args.level, debug=args.debug, progress=progress) + + # 1. check boot block + res = [] + boot_dos, bootable = v.scan_boot() + if boot_dos: + # 2. check root block + root = v.scan_root() + if not root: + # disk is bootable + if bootable: + res.append("boot") + else: + res.append(" ") + # invalid root + res.append("nofs") + else: + # 3. scan tree + v.scan_dir_tree() + # 4. scan files + v.scan_files() + # 5. scan_bitmap + v.scan_bitmap() + + # summary + e, w = v.get_summary() + if w > 0: + res.append("w%03d" % w) + if e > 0: + res.append("E%03d" % e) + else: + res.append(" ") + # disk is bootable + if bootable: + res.append("boot") + else: + res.append(" ") + if e == 0 and w == 0: + res.append(" ok ") + else: + res.append("NOK ") else: - res.append(" ") - # invalid root - res.append("nofs") - else: - # 3. scan tree - v.scan_dir_tree() - # 4. scan files - v.scan_files() - # 5. scan_bitmap - v.scan_bitmap() + # boot block is not dos + res.append("NDOS") + # report result + if len(res) == 0: + res.append("done") + log_path(path, " ".join(res)) # summary - e, w = v.get_summary() - if w > 0: - res.append("w%03d" % w) - if e > 0: - res.append("E%03d" % e) - else: - res.append(" ") - # disk is bootable - if bootable: - res.append("boot") - else: - res.append(" ") - if e == 0 and w == 0: - res.append(" ok ") - else: - res.append("NOK ") - else: - # boot block is not dos - res.append("NDOS") - - # report result - if len(res) == 0: - res.append("done") - log_path(path," ".join(res)) - # summary - if args.verbose: - v.log.dump() - return ret_code - except IOError as e: - log_path(path,"BLKDEV?") - if args.verbose: - print(e) - return 0 + if args.verbose: + v.log.dump() + return ret_code + except IOError as e: + log_path(path, "BLKDEV?") + if args.verbose: + print(e) + return 0 + # ----- main ----- def main(): - parser = argparse.ArgumentParser() - parser.add_argument('input', nargs='+', help="input image file or directory (to scan tree)") - parser.add_argument('-v', '--verbose', action='store_true', default=False, help="be more verbos") - parser.add_argument('-d', '--debug', action='store_true', default=False, help="show debug info") - parser.add_argument('-q', '--quick', action='store_true', default=False, help="quick mode. faster: skip image if root is invalid") - parser.add_argument('-l', '--level', default=2, help="show only level or above (0=debug, 1=info, 2=warn, 3=error)", type=int) - parser.add_argument('-D', '--skip-disks', action='store_true', default=False, help="do not scan disk images") - parser.add_argument('-H', '--skip-hds', action='store_true', default=False, help="do not scan hard disk images") - args = parser.parse_args() - - # main scan loop - ret = 0 - for i in args.input: - ret = scan(i, args) - if ret != 0: - break - return ret - - -if __name__ == '__main__': - try: - sys.exit(main()) - except KeyboardInterrupt as e: - print("aborting...") + parser = argparse.ArgumentParser() + parser.add_argument( + "input", nargs="+", help="input image file or directory (to scan tree)" + ) + parser.add_argument( + "-v", "--verbose", action="store_true", default=False, help="be more verbos" + ) + parser.add_argument( + "-d", "--debug", action="store_true", default=False, help="show debug info" + ) + parser.add_argument( + "-q", + "--quick", + action="store_true", + default=False, + help="quick mode. faster: skip image if root is invalid", + ) + parser.add_argument( + "-l", + "--level", + default=2, + help="show only level or above (0=debug, 1=info, 2=warn, 3=error)", + type=int, + ) + parser.add_argument( + "-D", + "--skip-disks", + action="store_true", + default=False, + help="do not scan disk images", + ) + parser.add_argument( + "-H", + "--skip-hds", + action="store_true", + default=False, + help="do not scan hard disk images", + ) + args = parser.parse_args() + + # main scan loop + ret = 0 + for i in args.input: + ret = scan(i, args) + if ret != 0: + break + return ret + + +if __name__ == "__main__": + try: + sys.exit(main()) + except KeyboardInterrupt as e: + print("aborting...") diff --git a/amitools/tools/xdftool.py b/amitools/tools/xdftool.py index f0ce3701..a5b924fb 100755 --- a/amitools/tools/xdftool.py +++ b/amitools/tools/xdftool.py @@ -3,8 +3,6 @@ # swiss army knife for adf and hdf amiga disk images - - import sys import argparse import os.path @@ -25,821 +23,891 @@ # system encoding def make_fsstr(s): - if sys.version_info[0] == 3: - if isinstance(s, str): - return FSString(s) - # fetch default encoding (if available) - encoding = sys.stdin.encoding - if encoding is None: - encoding = "utf-8" - try: - if os.platform == "win32": - # set win default encoding - encoding = "cp1252" - except AttributeError: - pass - u = s.decode(encoding) - return FSString(u) + if sys.version_info[0] == 3: + if isinstance(s, str): + return FSString(s) + # fetch default encoding (if available) + encoding = sys.stdin.encoding + if encoding is None: + encoding = "utf-8" + try: + if os.platform == "win32": + # set win default encoding + encoding = "cp1252" + except AttributeError: + pass + u = s.decode(encoding) + return FSString(u) + # ----- commands ----- class Command: - def __init__(self, args, opts, edit=False): - self.args = args - self.opts = opts - self.edit = edit - self.exit_code = 0 - - self.volume = None - self.blkdev = None - - def run(self, blkdev, vol): - # optional init blkdev function - if hasattr(self, "init_blkdev"): - if blkdev == None: - self.blkdev = self.init_blkdev(self.args.image_file) - if self.blkdev == None: - return 5 - blkdev = self.blkdev - - # optional init volume function - if hasattr(self, "init_vol"): - # close old - if vol != None: - vol.close() - # create new volume - self.volume = self.init_vol(blkdev) - if self.volume == None: - return 6 - vol = self.volume - - # common handler - if hasattr(self, 'handle_blkdev'): - return self.handle_blkdev(blkdev) - elif hasattr(self, 'handle_vol'): - return self.handle_vol(vol) - else: - return 0 - - def has_init_blkdev(self): - return hasattr(self, 'init_blkdev') - - def need_volume(self): - return hasattr(self, 'handle_vol') and not hasattr(self, 'init_vol') + def __init__(self, args, opts, edit=False): + self.args = args + self.opts = opts + self.edit = edit + self.exit_code = 0 + + self.volume = None + self.blkdev = None + + def run(self, blkdev, vol): + # optional init blkdev function + if hasattr(self, "init_blkdev"): + if blkdev == None: + self.blkdev = self.init_blkdev(self.args.image_file) + if self.blkdev == None: + return 5 + blkdev = self.blkdev + + # optional init volume function + if hasattr(self, "init_vol"): + # close old + if vol != None: + vol.close() + # create new volume + self.volume = self.init_vol(blkdev) + if self.volume == None: + return 6 + vol = self.volume + + # common handler + if hasattr(self, "handle_blkdev"): + return self.handle_blkdev(blkdev) + elif hasattr(self, "handle_vol"): + return self.handle_vol(vol) + else: + return 0 + + def has_init_blkdev(self): + return hasattr(self, "init_blkdev") + + def need_volume(self): + return hasattr(self, "handle_vol") and not hasattr(self, "init_vol") + # ----- command handler ----- class FSCommandQueue(CommandQueue): - def __init__(self, args, cmd_list, sep, cmd_map): - CommandQueue.__init__(self, cmd_list, sep, cmd_map) - self.args = args - self.blkdev = None - self.volume = None - - def run(self): - self.img = self.args.image_file - try: - # main command loop - exit_code = CommandQueue.run(self) - except FSError as e: - cmd = "'%s'" % " ".join(self.cmd_line) - print(cmd, "FSError:", e) - exit_code = 3 - except IOError as e: - cmd = "'%s'" % " ".join(self.cmd_line) - print(cmd, "IOError:", e) - exit_code = 4 - finally: - # close volume - if self.volume != None: - self.volume.close() + def __init__(self, args, cmd_list, sep, cmd_map): + CommandQueue.__init__(self, cmd_list, sep, cmd_map) + self.args = args + self.blkdev = None + self.volume = None + + def run(self): + self.img = self.args.image_file + try: + # main command loop + exit_code = CommandQueue.run(self) + except FSError as e: + cmd = "'%s'" % " ".join(self.cmd_line) + print(cmd, "FSError:", e) + exit_code = 3 + except IOError as e: + cmd = "'%s'" % " ".join(self.cmd_line) + print(cmd, "IOError:", e) + exit_code = 4 + finally: + # close volume + if self.volume != None: + self.volume.close() + if self.args.verbose: + print("closing volume:", self.img) + # close blkdev + if self.blkdev != None: + self.blkdev.close() + if self.args.verbose: + print("closing image:", self.img) + return exit_code + + def create_cmd(self, cclass, name, opts): + return cclass(self.args, opts) + + def _open_volume(self): + # setup volume + if self.volume == None: + self.volume = ADFSVolume(self.blkdev) + if self.args.verbose: + print("opening volume:", self.img) + self.volume.open() + + def run_first(self, cmd_line, cmd): + self.cmd_line = cmd_line + + # check if first command is an init command + if not cmd.has_init_blkdev(): + # auto add 'open' command + pre_cmd = OpenCmd(self.args, []) + if self.args.verbose: + print("auto open command:", self.cmd_line) + exit_code = pre_cmd.run(self.blkdev, self.volume) + if self.args.verbose: + print("auto open exit_code:", exit_code) + if exit_code != 0: + return exit_code + self.blkdev = pre_cmd.blkdev + # setup volume (if necessary) + if cmd.need_volume(): + self._open_volume() + + # run first command + if self.args.verbose: + print("command:", self.cmd_line) + if cmd.edit and self.args.read_only: + raise IOError("Edit commands not allowed in read-only mode") + + # check code of command after __init__ parsing + if cmd.exit_code != 0: + return cmd.exit_code + + # perform command + exit_code = cmd.run(self.blkdev, self.volume) + if cmd.blkdev != None: + self.blkdev = cmd.blkdev + if cmd.volume != None: + self.volume = cmd.volume + + # final exit code + if self.args.verbose: + print("exit_code:", exit_code) + return exit_code + + def run_next(self, cmd_line, cmd): + self.cmd_line = cmd_line if self.args.verbose: - print("closing volume:", self.img) - # close blkdev - if self.blkdev != None: - self.blkdev.close() + print("command:", self.cmd_line) + # verify command + if cmd.edit and self.args.read_only: + raise IOError("Edit commands not allowed in read-only mode") + # make sure volume is set up + if self.volume == None and cmd.need_volume(): + self._open_volume() + # run command + exit_code = cmd.run(self.blkdev, self.volume) + if cmd.blkdev != None: + self.blkdev = cmd.blkdev + if cmd.volume != None: + self.volume = cmd.volume if self.args.verbose: - print("closing image:",self.img) - return exit_code - - def create_cmd(self, cclass, name, opts): - return cclass(self.args, opts) - - def _open_volume(self): - # setup volume - if self.volume == None: - self.volume = ADFSVolume(self.blkdev) - if self.args.verbose: - print("opening volume:", self.img) - self.volume.open() - - def run_first(self, cmd_line, cmd): - self.cmd_line = cmd_line - - # check if first command is an init command - if not cmd.has_init_blkdev(): - # auto add 'open' command - pre_cmd = OpenCmd(self.args, []) - if self.args.verbose: - print("auto open command:", self.cmd_line) - exit_code = pre_cmd.run(self.blkdev, self.volume) - if self.args.verbose: - print("auto open exit_code:", exit_code) - if exit_code != 0: + print("exit_code:", exit_code) return exit_code - self.blkdev = pre_cmd.blkdev - # setup volume (if necessary) - if cmd.need_volume(): - self._open_volume() - - # run first command - if self.args.verbose: - print("command:", self.cmd_line) - if cmd.edit and self.args.read_only: - raise IOError("Edit commands not allowed in read-only mode") - - # check code of command after __init__ parsing - if cmd.exit_code != 0: - return cmd.exit_code - - # perform command - exit_code = cmd.run(self.blkdev, self.volume) - if cmd.blkdev != None: - self.blkdev = cmd.blkdev - if cmd.volume != None: - self.volume = cmd.volume - - # final exit code - if self.args.verbose: - print("exit_code:", exit_code) - return exit_code - - def run_next(self, cmd_line, cmd): - self.cmd_line = cmd_line - if self.args.verbose: - print("command:", self.cmd_line) - # verify command - if cmd.edit and self.args.read_only: - raise IOError("Edit commands not allowed in read-only mode") - # make sure volume is set up - if self.volume == None and cmd.need_volume(): - self._open_volume() - # run command - exit_code = cmd.run(self.blkdev, self.volume) - if cmd.blkdev != None: - self.blkdev = cmd.blkdev - if cmd.volume != None: - self.volume = cmd.volume - if self.args.verbose: - print("exit_code:", exit_code) - return exit_code + # ----- Init: Open/Create ----- + class OpenCmd(Command): - def init_blkdev(self, image_file): - opts = KeyValue.parse_key_value_strings(self.opts) - f = BlkDevFactory() - return f.open(image_file, options=opts, read_only=self.args.read_only) + def init_blkdev(self, image_file): + opts = KeyValue.parse_key_value_strings(self.opts) + f = BlkDevFactory() + return f.open(image_file, options=opts, read_only=self.args.read_only) + class CreateCmd(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def init_blkdev(self, image_file): + opts = KeyValue.parse_key_value_strings(self.opts) + f = BlkDevFactory() + return f.create(image_file, options=opts, force=self.args.force) - def init_blkdev(self, image_file): - opts = KeyValue.parse_key_value_strings(self.opts) - f = BlkDevFactory() - return f.create(image_file, options=opts, force=self.args.force) class FormatCmd(Command): - def init_blkdev(self, image_file): - opts = KeyValue.parse_key_value_strings(self.opts[1:]) - f = BlkDevFactory() - blkdev = f.open(image_file, options=opts, read_only=False, - none_if_missing=True) - if not blkdev: - return f.create(image_file, options=opts, force=self.args.force) - else: - return blkdev - - def init_vol(self, blkdev): - vol = ADFSVolume(blkdev) - n = len(self.opts) - if n < 1 or n > 2: - print("Usage: format [dos_type]") - return None - else: - if n > 1: - dos_str = self.opts[1] - dos_type = DosType.parse_dos_type_str(dos_str) - if dos_type is None: - print("ERROR invalid dos_tpye:", dos_str) - return None - else: - dos_type = None - vol_name = make_fsstr(self.opts[0]) - vol.create(vol_name, dos_type=dos_type) - return vol + def init_blkdev(self, image_file): + opts = KeyValue.parse_key_value_strings(self.opts[1:]) + f = BlkDevFactory() + blkdev = f.open(image_file, options=opts, read_only=False, none_if_missing=True) + if not blkdev: + return f.create(image_file, options=opts, force=self.args.force) + else: + return blkdev + + def init_vol(self, blkdev): + vol = ADFSVolume(blkdev) + n = len(self.opts) + if n < 1 or n > 2: + print("Usage: format [dos_type]") + return None + else: + if n > 1: + dos_str = self.opts[1] + dos_type = DosType.parse_dos_type_str(dos_str) + if dos_type is None: + print("ERROR invalid dos_tpye:", dos_str) + return None + else: + dos_type = None + vol_name = make_fsstr(self.opts[0]) + vol.create(vol_name, dos_type=dos_type) + return vol + # ----- Pack/Unpack ----- + class PackCmd(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - self.imager = Imager() - n = len(self.opts) - if n == 0: - print("Usage: pack [dos_type] [out_size]") - self.exit_code = 1 - else: - self.in_path = self.opts[0] - blkdev_opts = None - dos_type = None - if n > 1: - # is a dostype given? - dos_str = opts[1] - dos_type = DosType.parse_dos_type_str(dos_str) - if dos_type is not None: - begin = 2 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + self.imager = Imager() + n = len(self.opts) + if n == 0: + print("Usage: pack [dos_type] [out_size]") + self.exit_code = 1 else: - begin = 1 - # take remainder as blkdev opts - blkdev_opts = KeyValue.parse_key_value_strings(opts[begin:]) - self.blkdev_opts = blkdev_opts - self.dos_type = dos_type - self.imager.pack_begin(self.in_path) - - def init_blkdev(self, image_file): - return self.imager.pack_create_blkdev(self.in_path, image_file, force=self.args.force, options=self.blkdev_opts) - - def init_vol(self, blkdev): - return self.imager.pack_create_volume(self.in_path, blkdev, dos_type=self.dos_type) - - def handle_vol(self, volume): - self.imager.pack_root(self.in_path, volume) - self.imager.pack_end(self.in_path, volume) - if self.args.verbose: - print("Packed %d bytes" % (self.imager.get_total_bytes())) - return 0 + self.in_path = self.opts[0] + blkdev_opts = None + dos_type = None + if n > 1: + # is a dostype given? + dos_str = opts[1] + dos_type = DosType.parse_dos_type_str(dos_str) + if dos_type is not None: + begin = 2 + else: + begin = 1 + # take remainder as blkdev opts + blkdev_opts = KeyValue.parse_key_value_strings(opts[begin:]) + self.blkdev_opts = blkdev_opts + self.dos_type = dos_type + self.imager.pack_begin(self.in_path) + + def init_blkdev(self, image_file): + return self.imager.pack_create_blkdev( + self.in_path, image_file, force=self.args.force, options=self.blkdev_opts + ) + + def init_vol(self, blkdev): + return self.imager.pack_create_volume( + self.in_path, blkdev, dos_type=self.dos_type + ) + + def handle_vol(self, volume): + self.imager.pack_root(self.in_path, volume) + self.imager.pack_end(self.in_path, volume) + if self.args.verbose: + print("Packed %d bytes" % (self.imager.get_total_bytes())) + return 0 + class UnpackCmd(Command): - def handle_vol(self, vol): - n = len(self.opts) - if n == 0: - print("Usage: unpack [fsuae]") - return 1 - else: - meta_mode = Imager.META_MODE_DB - if 'fsuae' in self.opts: - meta_mode = Imager.META_MODE_FSUAE - out_path = self.opts[0] - img = Imager(meta_mode=meta_mode) - img.unpack(vol, out_path) - if self.args.verbose: - print("Unpacked %d bytes" % (img.get_total_bytes())) - return 0 + def handle_vol(self, vol): + n = len(self.opts) + if n == 0: + print("Usage: unpack [fsuae]") + return 1 + else: + meta_mode = Imager.META_MODE_DB + if "fsuae" in self.opts: + meta_mode = Imager.META_MODE_FSUAE + out_path = self.opts[0] + img = Imager(meta_mode=meta_mode) + img.unpack(vol, out_path) + if self.args.verbose: + print("Unpacked %d bytes" % (img.get_total_bytes())) + return 0 + class RepackCmd(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - n = len(self.opts) - if n == 0: - print("Usage: repack [in_size]") - self.exit_code = 1 - in_img = self.opts[0] - in_opts = KeyValue.parse_key_value_strings(self.opts[1:]) - self.repacker = Repacker(in_img, in_opts) - if not self.repacker.create_in(): - self.exit_code = 2 - - def init_blkdev(self, image_file): - return self.repacker.create_out_blkdev(image_file) - - def init_vol(self, blkdev): - return self.repacker.create_out_volume(blkdev) - - def handle_vol(self, vol): - self.repacker.repack() - return 0 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + n = len(self.opts) + if n == 0: + print("Usage: repack [in_size]") + self.exit_code = 1 + in_img = self.opts[0] + in_opts = KeyValue.parse_key_value_strings(self.opts[1:]) + self.repacker = Repacker(in_img, in_opts) + if not self.repacker.create_in(): + self.exit_code = 2 + + def init_blkdev(self, image_file): + return self.repacker.create_out_blkdev(image_file) + + def init_vol(self, blkdev): + return self.repacker.create_out_volume(blkdev) + + def handle_vol(self, vol): + self.repacker.repack() + return 0 + # ----- Query Image ----- # list: list directory tree class ListCmd(Command): - def handle_vol(self, vol): - n = len(self.opts) - if n == 0: - vol.root_dir.list(all=True) - show_info = True - show_all = True - node = vol.get_root_dir() - else: - name = make_fsstr(self.opts[0]) - node = vol.get_path_name(name) - if node != None: - show_all = "all" in self.opts - show_detail = "detail" in self.opts - show_info = "info" in self.opts - node.list(all=show_all, detail=show_detail) - else: - print("ERROR path not found:", node) - return 2 - if show_info: - info = node.get_info(show_all) - for line in info: - print(line) - return 0 + def handle_vol(self, vol): + n = len(self.opts) + if n == 0: + vol.root_dir.list(all=True) + show_info = True + show_all = True + node = vol.get_root_dir() + else: + name = make_fsstr(self.opts[0]) + node = vol.get_path_name(name) + if node != None: + show_all = "all" in self.opts + show_detail = "detail" in self.opts + show_info = "info" in self.opts + node.list(all=show_all, detail=show_detail) + else: + print("ERROR path not found:", node) + return 2 + if show_info: + info = node.get_info(show_all) + for line in info: + print(line) + return 0 + class TypeCmd(Command): - def handle_vol(self, vol): - p = self.opts - if len(p) == 0: - print("Usage: type ") - return 1 - else: - name = make_fsstr(p[0]) - data = vol.read_file(name) - sys.stdout.buffer.write(data) - return 0 + def handle_vol(self, vol): + p = self.opts + if len(p) == 0: + print("Usage: type ") + return 1 + else: + name = make_fsstr(p[0]) + data = vol.read_file(name) + sys.stdout.buffer.write(data) + return 0 + class ReadCmd(Command): - def handle_vol(self, vol): - p = self.opts - n = len(p) - if n == 0 or n > 2: - print("Usage: read [sys_file]") - return 1 - # determine output name - out_name = os.path.basename(p[0]) - if n == 2: - if os.path.isdir(p[1]): - out_name = os.path.join(p[1],out_name) - else: - out_name = p[1] - # single file operation - name = make_fsstr(p[0]) - node = vol.get_path_name(name) - if node == None: - print("Node not found:", p[0]) - return 2 - # its a file - if node.is_file(): - data = node.get_file_data() - # write data to file - fh = open(out_name,"wb") - fh.write(data) - fh.close() - # its a dir - elif node.is_dir(): - img = Imager(meta_mode=Imager.META_MODE_NONE) - img.unpack_dir(node, out_name) - node.flush() - return 0 + def handle_vol(self, vol): + p = self.opts + n = len(p) + if n == 0 or n > 2: + print("Usage: read [sys_file]") + return 1 + # determine output name + out_name = os.path.basename(p[0]) + if n == 2: + if os.path.isdir(p[1]): + out_name = os.path.join(p[1], out_name) + else: + out_name = p[1] + # single file operation + name = make_fsstr(p[0]) + node = vol.get_path_name(name) + if node == None: + print("Node not found:", p[0]) + return 2 + # its a file + if node.is_file(): + data = node.get_file_data() + # write data to file + fh = open(out_name, "wb") + fh.write(data) + fh.close() + # its a dir + elif node.is_dir(): + img = Imager(meta_mode=Imager.META_MODE_NONE) + img.unpack_dir(node, out_name) + node.flush() + return 0 + class InfoCmd(Command): - def handle_vol(self, vol): - info = vol.get_info() - for line in info: - print(line) - return 0 + def handle_vol(self, vol): + info = vol.get_info() + for line in info: + print(line) + return 0 + # ----- Edit Image ----- + class MakeDirCmd(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - def handle_vol(self, vol): - if len(self.opts) != 1: - print("Usage: mkdir ") - return 1 - else: - dir_path = make_fsstr(self.opts[0]) - vol.create_dir(dir_path) - return 0 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def handle_vol(self, vol): + if len(self.opts) != 1: + print("Usage: mkdir ") + return 1 + else: + dir_path = make_fsstr(self.opts[0]) + vol.create_dir(dir_path) + return 0 + class WriteCmd(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - def handle_vol(self, vol): - n = len(self.opts) - if n == 0 or n > 2: - print("Usage: write [ami_path]") - return 1 - # get file_name and ami_path - sys_file = self.opts[0] - file_name = os.path.basename(sys_file) - if n > 1: - ami_path = self.opts[1] - else: - ami_path = os.path.basename(sys_file) - # check sys path - if not os.path.exists(sys_file): - print("File not found:", sys_file) - return 2 - - ami_path = make_fsstr(ami_path) - file_name = make_fsstr(file_name) - # handle file - if os.path.isfile(sys_file): - fh = open(sys_file,"rb") - data = fh.read() - fh.close() - vol.write_file(data, ami_path, file_name) - # handle dir - elif os.path.isdir(sys_file): - parent_node, dir_name = vol.get_create_path_name(ami_path,file_name) - if parent_node == None: - print("Invalid path", ami_path) - return 2 - node = parent_node.create_dir(dir_name) - img = Imager(meta_mode=Imager.META_MODE_NONE) - img.pack_dir(sys_file, node) - - return 0 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def handle_vol(self, vol): + n = len(self.opts) + if n == 0 or n > 2: + print("Usage: write [ami_path]") + return 1 + # get file_name and ami_path + sys_file = self.opts[0] + file_name = os.path.basename(sys_file) + if n > 1: + ami_path = self.opts[1] + else: + ami_path = os.path.basename(sys_file) + # check sys path + if not os.path.exists(sys_file): + print("File not found:", sys_file) + return 2 + + ami_path = make_fsstr(ami_path) + file_name = make_fsstr(file_name) + # handle file + if os.path.isfile(sys_file): + fh = open(sys_file, "rb") + data = fh.read() + fh.close() + vol.write_file(data, ami_path, file_name) + # handle dir + elif os.path.isdir(sys_file): + parent_node, dir_name = vol.get_create_path_name(ami_path, file_name) + if parent_node == None: + print("Invalid path", ami_path) + return 2 + node = parent_node.create_dir(dir_name) + img = Imager(meta_mode=Imager.META_MODE_NONE) + img.pack_dir(sys_file, node) + + return 0 + class DeleteCmd(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - def handle_vol(self, vol): - n = len(self.opts) - if n == 0: - print("Usage: delete [wipe] [all]") - return 1 - do_wipe = 'wipe' in self.opts - do_all = 'all' in self.opts - path = make_fsstr(self.opts[0]) - node = vol.delete(path, wipe=do_wipe, all=do_all) - return 0 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def handle_vol(self, vol): + n = len(self.opts) + if n == 0: + print("Usage: delete [wipe] [all]") + return 1 + do_wipe = "wipe" in self.opts + do_all = "all" in self.opts + path = make_fsstr(self.opts[0]) + node = vol.delete(path, wipe=do_wipe, all=do_all) + return 0 + class ProtectCmd(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - def handle_vol(self, vol): - n = len(self.opts) - if n != 2: - print("Usage: protect ") - return 1 - name = make_fsstr(self.opts[0]) - pr_str = self.opts[1] - node = vol.get_path_name(name) - if node != None: - node.change_protect_by_string(pr_str) - return 0 - else: - print("Can't find node:", name) - return 2 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def handle_vol(self, vol): + n = len(self.opts) + if n != 2: + print("Usage: protect ") + return 1 + name = make_fsstr(self.opts[0]) + pr_str = self.opts[1] + node = vol.get_path_name(name) + if node != None: + node.change_protect_by_string(pr_str) + return 0 + else: + print("Can't find node:", name) + return 2 + class CommentCmd(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - def handle_vol(self, vol): - n = len(self.opts) - if n != 2: - print("Usage: comment ") - return 1 - name = make_fsstr(self.opts[0]) - comment = make_fsstr(self.opts[1]) - node = vol.get_path_name(name) - if node != None: - node.change_comment(comment) - return 0 - else: - print("Can't find node:", name) - return 2 + def __init__(self, args, opts): + Command.__init__(self, args, opts, edit=True) + + def handle_vol(self, vol): + n = len(self.opts) + if n != 2: + print("Usage: comment ") + return 1 + name = make_fsstr(self.opts[0]) + comment = make_fsstr(self.opts[1]) + node = vol.get_path_name(name) + if node != None: + node.change_comment(comment) + return 0 + else: + print("Can't find node:", name) + return 2 + class TimeCmd(Command): - def __init__(self, args, opts): - Command.__init__(self, args, opts, edit=True) - def handle_vol(self, vol): - n = len(self.opts) - if n != 2: - print("Usage: time