1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
|
package main
// TODO: use bookpipeline package to do aws stuff
import (
"flag"
"fmt"
"log"
"os"
"path/filepath"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/aws/aws-sdk-go/service/sqs"
)
const usage = `Usage: booktopipeline [-prebinarised] [-v] bookdir [bookname]
Uploads the book in bookdir to the S3 'inprogress' bucket and adds it
to the 'preprocess' SQS queue, or the 'wipeonly' queue if the
prebinarised flag is set.
If bookname is omitted the last part of the bookdir is used.
`
// null writer to enable non-verbose logging to be discarded
type NullWriter bool
func (w NullWriter) Write(p []byte) (n int, err error) {
return len(p), nil
}
var verboselog *log.Logger
type fileWalk chan string
func (f fileWalk) Walk(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if !info.IsDir() {
f <- path
}
return nil
}
func main() {
verbose := flag.Bool("v", false, "Verbose")
wipeonly := flag.Bool("prebinarised", false, "Prebinarised: only preprocessing will be to wipe")
flag.Usage = func() {
fmt.Fprintf(flag.CommandLine.Output(), usage)
flag.PrintDefaults()
}
flag.Parse()
if flag.NArg() < 1 {
flag.Usage()
return
}
bookdir := flag.Arg(0)
var bookname string
if flag.NArg() > 2 {
bookname = flag.Arg(1)
} else {
bookname = filepath.Base(bookdir)
}
if *verbose {
verboselog = log.New(os.Stdout, "", log.LstdFlags)
} else {
var n NullWriter
verboselog = log.New(n, "", log.LstdFlags)
}
verboselog.Println("Setting up AWS session")
sess, err := session.NewSession(&aws.Config{
Region: aws.String("eu-west-2"),
})
if err != nil {
log.Fatalln("Error: failed to set up aws session:", err)
}
sqssvc := sqs.New(sess)
uploader := s3manager.NewUploader(sess)
var qname string
if *wipeonly {
qname = "rescribewipeonly"
} else {
qname = "rescribepreprocess"
}
verboselog.Println("Getting Queue URL for", qname)
result, err := sqssvc.GetQueueUrl(&sqs.GetQueueUrlInput{
QueueName: aws.String(qname),
})
if err != nil {
log.Fatalln("Error getting queue URL for", qname, ":", err)
}
qurl := *result.QueueUrl
// concurrent walking upload based on example at
// https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/sdk-utilities.html
verboselog.Println("Walking", bookdir)
walker := make(fileWalk)
go func() {
err = filepath.Walk(bookdir, walker.Walk)
if err != nil {
log.Fatalln("Filesystem walk failed:", err)
}
close(walker)
}()
for path := range walker {
verboselog.Println("Uploading", path)
name := filepath.Base(path)
file, err := os.Open(path)
if err != nil {
log.Fatalln("Open file", path, "failed:", err)
}
//defer file.Close() // done explicitly below
_, err = uploader.Upload(&s3manager.UploadInput{
Bucket: aws.String("rescribeinprogress"),
Key: aws.String(filepath.Join(bookname, name)),
Body: file,
})
if err != nil {
log.Fatalln("Failed to upload", path, err)
}
// Explicitly close here rather than wait for the defer, so we
// don't end up with too many open files which can cause os.Open
// to fail.
err = file.Close()
if err != nil {
log.Fatalln("Failed to close file", path, err)
}
}
verboselog.Println("Sending message", bookname, "to queue", qurl)
_, err = sqssvc.SendMessage(&sqs.SendMessageInput{
MessageBody: aws.String(bookname),
QueueUrl: &qurl,
})
if err != nil {
log.Fatalln("Error adding book to queue:", err)
}
}
|