typePayloadCollectionstruct {
WindowsVersionstring`json:"version"`Tokenstring`json:"token"`Payloads []Payload`json:"data"`
}
typePayloadstruct {
// [redacted]
}
func (p*Payload) UploadToS3() error {
// the storageFolder method ensures that there are no name collision in// case we get same timestamp in the key namestorage_path :=fmt.Sprintf("%v/%v", p.storageFolder, time.Now().UnixNano())
bucket :=S3Bucketb :=new(bytes.Buffer)
encodeErr :=json.NewEncoder(b).Encode(payload)
ifencodeErr!=nil {
returnencodeErr
}
// Everything we post to the S3 bucket should be marked 'private'varacl=s3.PrivatevarcontentType="application/octet-stream"returnbucket.PutReader(storage_path, b, int64(b.Len()), contentType, acl, s3.Options{})
}
funcpayloadHandler(whttp.ResponseWriter, r*http.Request) {
ifr.Method!="POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
// Read the body into a string for json decodingvarcontent=&PayloadCollection{}
err :=json.NewDecoder(io.LimitReader(r.Body, MaxLength)).Decode(&content)
iferr!=nil {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusBadRequest)
return
}
// Go through each payload and queue items individually to be posted to S3for_, payload :=rangecontent.Payloads {
gopayload.UploadToS3() // <----- DON'T DO THIS
}
w.WriteHeader(http.StatusOK)
}
var (
MaxWorker=os.Getenv("MAX_WORKERS")
MaxQueue=os.Getenv("MAX_QUEUE")
)
// Job represents the job to be runtypeJobstruct {
PayloadPayload
}
// A buffered channel that we can send work requests on.varJobQueuechanJob// Worker represents the worker that executes the jobtypeWorkerstruct {
WorkerPoolchanchanJobJobChannelchanJobquitchanbool
}
funcNewWorker(workerPoolchanchanJob) Worker {
returnWorker{
WorkerPool: workerPool,
JobChannel: make(chanJob),
quit: make(chanbool)}
}
// Start method starts the run loop for the worker, listening for a quit channel in// case we need to stop itfunc (wWorker) Start() {
gofunc() {
for {
// register the current worker into the worker queue.w.WorkerPool<-w.JobChannelselect {
casejob :=<-w.JobChannel:
// we have received a work request.iferr :=job.Payload.UploadToS3(); err!=nil {
log.Errorf("Error uploading to S3: %s", err.Error())
}
case<-w.quit:
// we have received a signal to stopreturn
}
}
}()
}
// Stop signals the worker to stop listening for work requests.func (wWorker) Stop() {
gofunc() {
w.quit<-true
}()
}
funcpayloadHandler(whttp.ResponseWriter, r*http.Request) {
ifr.Method!="POST" {
w.WriteHeader(http.StatusMethodNotAllowed)
return
}
// Read the body into a string for json decodingvarcontent=&PayloadCollection{}
err :=json.NewDecoder(io.LimitReader(r.Body, MaxLength)).Decode(&content)
iferr!=nil {
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusBadRequest)
return
}
// Go through each payload and queue items individually to be posted to S3for_, payload :=rangecontent.Payloads {
// let's create a job with the payloadwork :=Job{Payload: payload}
// Push the work onto the queue.JobQueue<-work
}
w.WriteHeader(http.StatusOK)
}
typeDispatcherstruct {
// A pool of workers channels that are registered with the dispatcherWorkerPoolchanchanJob
}
funcNewDispatcher(maxWorkersint) *Dispatcher {
pool :=make(chanchanJob, maxWorkers)
return&Dispatcher{WorkerPool: pool}
}
func (d*Dispatcher) Run() {
// starting n number of workersfori :=0; i<d.maxWorkers; i++ {
worker :=NewWorker(d.pool)
worker.Start()
}
god.dispatch()
}
func (d*Dispatcher) dispatch() {
for {
select {
casejob :=<-JobQueue:
// a job request has been receivedgofunc(jobJob) {
// try to obtain a worker job channel that is available.// this will block until a worker is idlejobChannel :=<-d.WorkerPool// dispatch the job to the worker job channeljobChannel<-job
}(job)
}
}
}