我的 Google App Engine Go 项目在 Google Cloud Storage 中的“文件夹”中创建了多个文件的 zip。当它使用现已弃用和删除的 Files API 在 BlobStore 中实现时,它曾经非常快。我最近将代码转换为使用 Google Cloud Storage,现在性能非常糟糕,有时会超时。被压缩的文件大小在 1K 到 2M 之间。
我正在寻找任何建议来改进压缩文件内容。下面的代码是我为将云中的多个文件压缩为云中的新 zip 文件而编写的。执行可能需要很长时间,并且需要在将每个文件写入 zip 之前将其全部内容(请参阅下面的性能问题)加载到内存中。一定有更好的方法。
// Pack a folder into zip file
func (cloud *Cloud) Pack(srcFolder string, fileName string, contentType string, metaData *map[string]string) {
log.Infof(cloud.c, "Packing bucket %v folder %v to file %v", cloud.bucket, srcFolder, fileName)
srcFolder = fmt.Sprintf("%v/", srcFolder)
query := &storage.Query{Prefix: srcFolder, Delimiter: "/"}
objs, err := storage.ListObjects(cloud.ctx, cloud.bucket, query)
if err != nil {
log.Errorf(cloud.c, "Packing failed to list bucket %q: %v", cloud.bucket, err)
return
}
totalFiles := len(objs.Results)
if totalFiles == 0 {
log.Errorf(cloud.c, "Packing failed to find objects found in folder %q: %v", cloud.bucket, srcFolder)
return
}
// create storage file for writing
log.Infof(cloud.c, "Writing new zip file to %v/%v for %v files", cloud.bucket, fileName, totalFiles)
storageWriter := storage.NewWriter(cloud.ctx, cloud.bucket, fileName)
// add optional content type and meta data
if len(contentType) > 0 { storageWriter.ContentType = contentType }
if metaData != nil { storageWriter.Metadata = *metaData }
// Create a buffer to write our archive to.
buf := new(bytes.Buffer)
// Create a new zip archive to memory buffer
zipWriter := zip.NewWriter(buf)
// go through each file in the folder
for _, obj := range objs.Results {
log.Infof(cloud.c, "Packing file %v of size %v to zip file", obj.Name, obj.Size)
//d.dumpStats(obj)
相关分类