牧羊人nacy
我认为打击例子应该引导你走向正确的方向。它是如何使用github.com/pierrec/lz4包压缩和解压缩的最简单示例。//compress project main.gopackage mainimport "fmt"import "github.com/pierrec/lz4"var fileContent = `CompressBlock compresses the source buffer starting at soffet into the destination one.This is the fast version of LZ4 compression and also the default one.The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.An error is returned if the destination buffer is too small.`func main() { toCompress := []byte(fileContent) compressed := make([]byte, len(toCompress)) //compress l, err := lz4.CompressBlock(toCompress, compressed, 0) if err != nil { panic(err) } fmt.Println("compressed Data:", string(compressed[:l])) //decompress decompressed := make([]byte, len(toCompress)) l, err = lz4.UncompressBlock(compressed[:l], decompressed, 0) if err != nil { panic(err) } fmt.Println("\ndecompressed Data:", string(decompressed[:l]))}
慕森卡
使用 bufio 包,您可以(解)压缩文件,而无需一次性将文件的全部内容全部放入您的内存中。实际上,这允许您(解)压缩大于系统可用内存的文件,这可能与您的特定情况相关,也可能不相关。如果这是相关的,您可以在此处找到一个工作示例:package mainimport ( "bufio" "io" "os" "github.com/pierrec/lz4")// Compress a file, then decompress it again!func main() { compress("./compress-me.txt", "./compressed.txt") decompress("./compressed.txt", "./decompressed.txt")}func compress(inputFile, outputFile string) { // open input file fin, err := os.Open(inputFile) if err != nil { panic(err) } defer func() { if err := fin.Close(); err != nil { panic(err) } }() // make a read buffer r := bufio.NewReader(fin) // open output file fout, err := os.Create(outputFile) if err != nil { panic(err) } defer func() { if err := fout.Close(); err != nil { panic(err) } }() // make an lz4 write buffer w := lz4.NewWriter(fout) // make a buffer to keep chunks that are read buf := make([]byte, 1024) for { // read a chunk n, err := r.Read(buf) if err != nil && err != io.EOF { panic(err) } if n == 0 { break } // write a chunk if _, err := w.Write(buf[:n]); err != nil { panic(err) } } if err = w.Flush(); err != nil { panic(err) }}func decompress(inputFile, outputFile string) { // open input file fin, err := os.Open(inputFile) if err != nil { panic(err) } defer func() { if err := fin.Close(); err != nil { panic(err) } }() // make an lz4 read buffer r := lz4.NewReader(fin) // open output file fout, err := os.Create(outputFile) if err != nil { panic(err) } defer func() { if err := fout.Close(); err != nil { panic(err) } }() // make a write buffer w := bufio.NewWriter(fout) // make a buffer to keep chunks that are read buf := make([]byte, 1024) for { // read a chunk n, err := r.Read(buf) if err != nil && err != io.EOF { panic(err) } if n == 0 { break } // write a chunk if _, err := w.Write(buf[:n]); err != nil { panic(err) } } if err = w.Flush(); err != nil { panic(err) }}
Cats萌萌
结果我所期望的是来自下面的代码。我得到了这个 [ https://www.google.com/url?q=https%3A%2F%2Fgithub.com%2Fpierrec%2Flz4%2Fblob%2Fmaster%2Flz4c%2Fmain.go&sa=D&sntz=1&usg=AFQjCNFIT2O1Grs0vu4Gh8Af9 ]文件。文件作为命令行参数中的输入给出,并且其压缩/解压缩成功。package mainimport ( // "bytes" "flag" "fmt" "io" "log" "os" "path" "runtime" "strings" "github.com/pierrec/lz4")func main() { // Process command line arguments var ( blockMaxSizeDefault = 4 << 20 flagStdout = flag.Bool("c", false, "output to stdout") flagDecompress = flag.Bool("d", false, "decompress flag") flagBlockMaxSize = flag.Int("B", blockMaxSizeDefault, "block max size [64Kb,256Kb,1Mb,4Mb]") flagBlockDependency = flag.Bool("BD", false, "enable block dependency") flagBlockChecksum = flag.Bool("BX", false, "enable block checksum") flagStreamChecksum = flag.Bool("Sx", false, "disable stream checksum") flagHighCompression = flag.Bool("9", false, "enabled high compression") ) flag.Usage = func() { fmt.Fprintf(os.Stderr, "Usage:\n\t%s [arg] [input]...\n\tNo input means [de]compress stdin to stdout\n\n", os.Args[0]) flag.PrintDefaults() } flag.Parse() fmt.Println("output to stdout ", *flagStdout) fmt.Println("Decompress", *flagDecompress) // Use all CPUs runtime.GOMAXPROCS(runtime.NumCPU()) zr := lz4.NewReader(nil) zw := lz4.NewWriter(nil) zh := lz4.Header{ BlockDependency: *flagBlockDependency, BlockChecksum: *flagBlockChecksum, BlockMaxSize: *flagBlockMaxSize, NoChecksum: *flagStreamChecksum, HighCompression: *flagHighCompression, } worker := func(in io.Reader, out io.Writer) { if *flagDecompress { fmt.Println("\n Decompressing the data") zr.Reset(in) if _, err := io.Copy(out, zr); err != nil { log.Fatalf("Error while decompressing input: %v", err) } } else { zw.Reset(out) zw.Header = zh if _, err := io.Copy(zw, in); err != nil { log.Fatalf("Error while compressing input: %v", err) } } } // No input means [de]compress stdin to stdout if len(flag.Args()) == 0 { worker(os.Stdin, os.Stdout) os.Exit(0) } // Compress or decompress all input files for _, inputFileName := range flag.Args() { outputFileName := path.Clean(inputFileName) if !*flagStdout { if *flagDecompress { outputFileName = strings.TrimSuffix(outputFileName, lz4.Extension) if outputFileName == inputFileName { log.Fatalf("Invalid output file name: same as input: %s", inputFileName) } } else { outputFileName += lz4.Extension } } inputFile, err := os.Open(inputFileName) if err != nil { log.Fatalf("Error while opening input: %v", err) } outputFile := os.Stdout if !*flagStdout { outputFile, err = os.Create(outputFileName) if err != nil { log.Fatalf("Error while opening output: %v", err) } } worker(inputFile, outputFile) inputFile.Close() if !*flagStdout { outputFile.Close() } }}样本输入去运行 compress.go -9=true sample.txt