From 16ea8034794ef030c969d586a7fc945bf4a2873a Mon Sep 17 00:00:00 2001 From: Nick White Date: Mon, 1 Feb 2021 11:45:27 +0000 Subject: Ensure DeleteObjects can handle over 1000 files to delete; fixes rmbook for large books --- aws.go | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/aws.go b/aws.go index 035b08a..65671fa 100644 --- a/aws.go +++ b/aws.go @@ -401,9 +401,25 @@ func (a *AwsConn) ListObjectPrefixes(bucket string) ([]string, error) { // Deletes a list of objects func (a *AwsConn) DeleteObjects(bucket string, keys []string) error { objs := []*s3.ObjectIdentifier{} - for _, v := range keys { + for i, v := range keys { o := s3.ObjectIdentifier{Key: aws.String(v)} objs = append(objs, &o) + // s3.DeleteObjects can only take up to 1000 keys at a time, + // so if necessary delete those collected so far and empty + // the objs queue + if i % 1000 == 1 { + _, err := a.s3svc.DeleteObjects(&s3.DeleteObjectsInput{ + Bucket: aws.String(bucket), + Delete: &s3.Delete{ + Objects: objs, + Quiet: aws.Bool(true), + }, + }) + if err != nil { + return err + } + objs = []*s3.ObjectIdentifier{} + } } _, err := a.s3svc.DeleteObjects(&s3.DeleteObjectsInput{ Bucket: aws.String(bucket), -- cgit v1.2.1-24-ge1ad