diff --git a/cfg_parser.go b/cfg_parser.go index 8f27940..a521f7b 100644 --- a/cfg_parser.go +++ b/cfg_parser.go @@ -1039,10 +1039,9 @@ func createRollingFileWriter(node *xmlNode, formatFromParent *formatter, formats rArchivePath = rollingArchiveDefaultExplodedName } else { - rArchivePath, ok = rollingArchiveTypesDefaultNames[rArchiveType] - if !ok { - return nil, fmt.Errorf("cannot get default filename for archive type = %v", - rArchiveType) + rArchivePath, err = rollingArchiveTypeDefaultName(rArchiveType, false) + if err != nil { + return nil, err } } } diff --git a/cfg_parser_test.go b/cfg_parser_test.go index ca4b194..0d4d969 100644 --- a/cfg_parser_test.go +++ b/cfg_parser_test.go @@ -275,6 +275,23 @@ func getParserTests() []parserTest { testExpected.RootDispatcher = testHeadSplitter parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) + testName = "Rolling file writer archive gzip" + testLogFileName = getTestFileName(testName, "") + testConfig = ` + + + + + ` + testExpected = new(configForParsing) + testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) + testExpected.Exceptions = nil + testrollingFileWriter, _ = NewRollingFileWriterSize(testLogFileName, rollingArchiveGzip, "log.tar.gz", 100, 5, rollingNameModePostfix, false) + testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testrollingFileWriter}) + testExpected.LogType = syncloggerTypeFromString + testExpected.RootDispatcher = testHeadSplitter + parserTests = append(parserTests, parserTest{testName, testConfig, testExpected, false, nil}) + testName = "Rolling file writer archive zip" testLogFileName = getTestFileName(testName, "") testConfig = ` @@ -320,7 +337,7 @@ func getParserTests() []parserTest { testExpected = new(configForParsing) testExpected.Constraints, _ = NewMinMaxConstraints(TraceLvl, CriticalLvl) testExpected.Exceptions = nil - testrollingFileWriter, _ = NewRollingFileWriterSize(testLogFileName, rollingArchiveZip, "old", 100, 5, rollingNameModePostfix , true) + testrollingFileWriter, _ = NewRollingFileWriterSize(testLogFileName, rollingArchiveZip, "old", 100, 5, rollingNameModePostfix, true) testHeadSplitter, _ = NewSplitDispatcher(DefaultFormatter, []interface{}{testrollingFileWriter}) testExpected.LogType = syncloggerTypeFromString testExpected.RootDispatcher = testHeadSplitter diff --git a/internals_fsutils.go b/internals_fsutils.go index a0e573d..e3695d5 100644 --- a/internals_fsutils.go +++ b/internals_fsutils.go @@ -1,8 +1,10 @@ package seelog import ( + "archive/tar" "archive/zip" "bytes" + "compress/gzip" "fmt" "io" "io/ioutil" @@ -408,3 +410,116 @@ func createZip(archiveName string, files map[string][]byte) error { return nil } + +func createTar(files map[string][]byte) ([]byte, error) { + + // Create a buffer to write our archive to. + tarBuffer := new(bytes.Buffer) + tarWriter := tar.NewWriter(tarBuffer) + + for fpath, fcont := range files { + + header := &tar.Header{ + Name: fpath, + Size: int64(len(fcont)), + Mode: defaultFilePermissions, + ModTime: time.Now(), + } + + err := tarWriter.WriteHeader(header) + + if err != nil { + return nil, err + } + + _, err = tarWriter.Write(fcont) + if err != nil { + return nil, err + } + } + tarWriter.Close() + + return tarBuffer.Bytes(), nil +} + +func unTar(data []byte) (map[string][]byte, error) { + tarReader := tar.NewReader(bytes.NewReader(data)) + files := make(map[string][]byte) + + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + + info := header.FileInfo() + if info.IsDir() { + continue + } + buffer := new(bytes.Buffer) + _, err = io.Copy(buffer, tarReader) + files[header.Name] = buffer.Bytes() + if err != nil { + return nil, err + } + } + + return files, nil + +} + +func createGzip(archiveName string, content []byte) error { + + // Create a buffer to write our archive to. + // Make sure to check the error on Close. + gzipBuffer := new(bytes.Buffer) + gzipWriter := gzip.NewWriter(gzipBuffer) + + _, err := gzipWriter.Write(content) + if err != nil { + return err + } + err = gzipWriter.Close() + if err != nil { + return err + } + + return ioutil.WriteFile(archiveName, gzipBuffer.Bytes(), defaultFilePermissions) + +} + +func unGzip(filename string) ([]byte, error) { + + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + reader, err := gzip.NewReader(file) + if err != nil { + return nil, err + } + + content := new(bytes.Buffer) + byteBuffer := make([]byte, 1000) + byteRead := 0 + for { + byteRead, err = reader.Read(byteBuffer) + if err == io.EOF { + break + } + content.Write(byteBuffer[0:byteRead]) + + } + reader.Close() + return content.Bytes(), nil + +} + +func isTar(data []byte) bool { + tarMagicNumbers := []byte{'\x75', '\x73', '\x74', '\x61', '\x72'} + return bytes.Equal(data[257:262], tarMagicNumbers) +} diff --git a/internals_fsutils_test.go b/internals_fsutils_test.go new file mode 100644 index 0000000..e4cc5c5 --- /dev/null +++ b/internals_fsutils_test.go @@ -0,0 +1,59 @@ +package seelog + +import ( + "reflect" + "testing" +) + +func TestGzip(t *testing.T) { + defer cleanupWriterTest(t) + + files := make(map[string][]byte) + files["file1"] = []byte("I am a log") + err := createGzip("./gzip.gz", files["file1"]) + if err != nil { + t.Fatal(err) + } + + decompressedFile, err := unGzip("./gzip.gz") + if err != nil { + t.Fatal(err) + } + + equal := reflect.DeepEqual(files["file1"], decompressedFile) + if !equal { + t.Fatal("gzip(ungzip(file)) should be equal to file") + } +} + +func TestTar(t *testing.T) { + defer cleanupWriterTest(t) + files := make(map[string][]byte) + files["file1"] = []byte("I am a log") + files["file2"] = []byte("I am another log") + tar, err := createTar(files) + if err != nil { + t.Fatal(err) + } + + resultFiles, err := unTar(tar) + if err != nil { + t.Fatal(err) + } + equal := reflect.DeepEqual(files, resultFiles) + if !equal { + t.Fatal("untar(tar(files)) should be equal to files") + } +} + +func TestIsTar(t *testing.T) { + defer cleanupWriterTest(t) + files := make(map[string][]byte) + files["file1"] = []byte("I am a log") + files["file2"] = []byte("I am another log") + tar, _ := createTar(files) + + if !isTar(tar) { + t.Fatal("tar(files) should be recognized as a tar file") + } +} diff --git a/writers_filewriter_test.go b/writers_filewriter_test.go index b070c8a..f723912 100644 --- a/writers_filewriter_test.go +++ b/writers_filewriter_test.go @@ -93,7 +93,7 @@ func NewFileWriterTester( } func isWriterTestFile(fn string) bool { - return strings.Contains(fn, ".testlog") || strings.Contains(fn, ".zip") + return strings.Contains(fn, ".testlog") || strings.Contains(fn, ".zip") || strings.Contains(fn, ".gz") } func cleanupWriterTest(t *testing.T) { diff --git a/writers_rollingfilewriter.go b/writers_rollingfilewriter.go index f2b2c75..b16886a 100644 --- a/writers_rollingfilewriter.go +++ b/writers_rollingfilewriter.go @@ -114,15 +114,75 @@ type rollingArchiveType uint8 const ( rollingArchiveNone = iota rollingArchiveZip + rollingArchiveGzip ) var rollingArchiveTypesStringRepresentation = map[rollingArchiveType]string{ rollingArchiveNone: "none", rollingArchiveZip: "zip", + rollingArchiveGzip: "gzip", +} + +type archive func(archiveName string, files map[string][]byte, exploded bool) error + +type unarchive func(archiveName string) (map[string][]byte, error) + +type compressionType struct { + extension string + handleMultipleEntries bool + archive archive + unarchive unarchive +} + +var compressionTypes = map[rollingArchiveType]compressionType{ + rollingArchiveZip: { + extension: ".zip", + handleMultipleEntries: true, + archive: func(archiveName string, files map[string][]byte, exploded bool) error { + return createZip(archiveName, files) + }, + unarchive: unzip, + }, + rollingArchiveGzip: { + extension: ".gz", + handleMultipleEntries: false, + archive: func(archiveName string, files map[string][]byte, exploded bool) error { + if exploded { + if len(files) != 1 { + return fmt.Errorf("Expected only 1 file but got %v file(s)", len(files)) + } + for _, data := range files { + return createGzip(archiveName, data) + } + } + tar, err := createTar(files) + if err != nil { + return err + } + return createGzip(archiveName, tar) + }, + unarchive: func(archiveName string) (map[string][]byte, error) { + content, err := unGzip(archiveName) + if err != nil { + return nil, err + } + if isTar(content) { + return unTar(content) + } + file := make(map[string][]byte) + file[archiveName] = content + return file, nil + }, + }, } -var rollingArchiveTypesExtension = map[rollingArchiveType]string { - rollingArchiveZip: ".zip", +func (compressionType *compressionType) rollingArchiveTypeName(name string, exploded bool) string { + if !compressionType.handleMultipleEntries && !exploded { + return name + ".tar" + compressionType.extension + } else { + return name + compressionType.extension + } + } func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveType, bool) { @@ -135,11 +195,15 @@ func rollingArchiveTypeFromString(rollingArchiveTypeStr string) (rollingArchiveT return 0, false } -// Default names for different archivation types +// Default names for different archive types var rollingArchiveDefaultExplodedName = "old" -var rollingArchiveTypesDefaultNames = map[rollingArchiveType]string{ - rollingArchiveZip: "log.zip", +func rollingArchiveTypeDefaultName(archiveType rollingArchiveType, exploded bool) (string, error) { + compressionType, ok := compressionTypes[archiveType] + if !ok { + return "", fmt.Errorf("cannot get default filename for archive type = %v", archiveType) + } + return compressionType.rollingArchiveTypeName("log", exploded), nil } // rollerVirtual is an interface that represents all virtual funcs that are @@ -278,6 +342,56 @@ func (rw *rollingFileWriter) createFileAndFolderIfNeeded(first bool) error { return nil } +func (rw *rollingFileWriter) archiveExplodedLogs(logFilename string, compressionType compressionType) error { + rollPath := filepath.Join(rw.currentDirPath, logFilename) + bts, err := ioutil.ReadFile(rollPath) + if err != nil { + return err + } + + entry := make(map[string][]byte) + entry[logFilename] = bts + archiveFile := path.Clean(rw.archivePath + "/" + compressionType.rollingArchiveTypeName(logFilename, true)) + + // archive entry + return compressionType.archive(archiveFile, entry, true) +} + +func (rw *rollingFileWriter) archiveUnexplodedLogs(compressionType compressionType, rollsToDelete int, history []string) error { + var files map[string][]byte + // If archive exists + _, err := os.Lstat(rw.archivePath) + if nil == err { + // Extract files and content from it + files, err = compressionType.unarchive(rw.archivePath) + if err != nil { + return err + } + + // Remove the original file + err = tryRemoveFile(rw.archivePath) + if err != nil { + return err + } + } else { + files = make(map[string][]byte) + } + + // Add files to the existing files map, filled above + for i := 0; i < rollsToDelete; i++ { + rollPath := filepath.Join(rw.currentDirPath, history[i]) + bts, err := ioutil.ReadFile(rollPath) + if err != nil { + return err + } + + files[rollPath] = bts + } + + // Put the final file set to archive file. + return compressionType.archive(rw.archivePath, files, false) +} + func (rw *rollingFileWriter) deleteOldRolls(history []string) error { if rw.maxRolls <= 0 { return nil @@ -288,69 +402,21 @@ func (rw *rollingFileWriter) deleteOldRolls(history []string) error { return nil } - switch rw.archiveType { - case rollingArchiveZip: + if rw.archiveType != rollingArchiveNone { if rw.archiveExploded { os.MkdirAll(rw.archivePath, defaultDirectoryPermissions) // Archive logs for i := 0; i < rollsToDelete; i++ { - rollPath := filepath.Join(rw.currentDirPath, history[i]) - bts, err := ioutil.ReadFile(rollPath) - if err != nil { - return err - } - - entry := make(map[string][]byte) - entry[history[i]] = bts - archiveFile := path.Clean(rw.archivePath + "/" + history[i] + rollingArchiveTypesExtension[rollingArchiveZip]) - - // zip entry - if err = createZip(archiveFile, entry); err != nil { - return err - } - + rw.archiveExplodedLogs(history[i], compressionTypes[rw.archiveType]) } - } else { - var files map[string][]byte os.MkdirAll(path.Dir(rw.archivePath), defaultDirectoryPermissions) - // If archive exists - _, err := os.Lstat(rw.archivePath) - if nil == err { - // Extract files and content from it - files, err = unzip(rw.archivePath) - if err != nil { - return err - } - - // Remove the original file - err = tryRemoveFile(rw.archivePath) - if err != nil { - return err - } - } else { - files = make(map[string][]byte) - } - - // Add files to the existing files map, filled above - for i := 0; i < rollsToDelete; i++ { - rollPath := filepath.Join(rw.currentDirPath, history[i]) - bts, err := ioutil.ReadFile(rollPath) - if err != nil { - return err - } - - files[rollPath] = bts - } - - // Put the final file set to zip file. - if err = createZip(rw.archivePath, files); err != nil { - return err - } + rw.archiveUnexplodedLogs(compressionTypes[rw.archiveType], rollsToDelete, history) } } + var err error // In all cases (archive files or not) the files should be deleted. for i := 0; i < rollsToDelete; i++ { @@ -508,13 +574,17 @@ func (rws *rollingFileWriterSize) isFileRollNameValid(rname string) bool { type rollSizeFileTailsSlice []string -func (p rollSizeFileTailsSlice) Len() int { return len(p) } +func (p rollSizeFileTailsSlice) Len() int { + return len(p) +} func (p rollSizeFileTailsSlice) Less(i, j int) bool { v1, _ := strconv.Atoi(p[i]) v2, _ := strconv.Atoi(p[j]) return v1 < v2 } -func (p rollSizeFileTailsSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p rollSizeFileTailsSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} func (rws *rollingFileWriterSize) sortFileRollNamesAsc(fs []string) ([]string, error) { ss := rollSizeFileTailsSlice(fs) @@ -608,7 +678,9 @@ type rollTimeFileTailsSlice struct { pattern string } -func (p rollTimeFileTailsSlice) Len() int { return len(p.data) } +func (p rollTimeFileTailsSlice) Len() int { + return len(p.data) +} func (p rollTimeFileTailsSlice) Less(i, j int) bool { t1, _ := time.ParseInLocation(p.pattern, p.data[i], time.Local) @@ -616,7 +688,9 @@ func (p rollTimeFileTailsSlice) Less(i, j int) bool { return t1.Before(t2) } -func (p rollTimeFileTailsSlice) Swap(i, j int) { p.data[i], p.data[j] = p.data[j], p.data[i] } +func (p rollTimeFileTailsSlice) Swap(i, j int) { + p.data[i], p.data[j] = p.data[j], p.data[i] +} func (rwt *rollingFileWriterTime) sortFileRollNamesAsc(fs []string) ([]string, error) { ss := rollTimeFileTailsSlice{data: fs, pattern: rwt.timePattern} diff --git a/writers_rollingfilewriter_test.go b/writers_rollingfilewriter_test.go index 1da50a1..df8a94b 100644 --- a/writers_rollingfilewriter_test.go +++ b/writers_rollingfilewriter_test.go @@ -61,6 +61,16 @@ func createRollingDatefileWriterTestCase( return &fileWriterTestCase{files, fileName, rollingTypeTime, 0, 0, datePattern, writeCount, resFiles, nameMode, archiveType, archiveExploded, archivePath} } +func TestShouldArchiveWithTar(t*testing.T) { + compressionType := compressionTypes[rollingArchiveGzip] + + archiveName := compressionType.rollingArchiveTypeName("log", false) + + if archiveName != "log.tar.gz" { + t.Fatalf("archive name should be log.tar.gz but got %v", archiveName) + } +} + func TestRollingFileWriter(t *testing.T) { t.Logf("Starting rolling file writer tests") NewFileWriterTester(rollingfileWriterTests, rollingFileWriterGetter, t).test()