Merge pull request #19060 from hashicorp/f-stx-etx

backend/remote: update `go-tfe` to support better log polling
This commit is contained in:
Sander van Harmelen 2018-10-11 23:11:52 +02:00 committed by GitHub
commit 7b55d1640e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 54 additions and 16 deletions

View File

@ -165,6 +165,9 @@ func (b *Remote) opApply(stopCtx, cancelCtx context.Context, op *backend.Operati
scanner := bufio.NewScanner(logs)
for scanner.Scan() {
if scanner.Text() == "\x02" || scanner.Text() == "\x03" {
continue
}
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(scanner.Text()))
}

View File

@ -208,6 +208,9 @@ func (b *Remote) plan(stopCtx, cancelCtx context.Context, op *backend.Operation,
scanner := bufio.NewScanner(logs)
for scanner.Scan() {
if scanner.Text() == "\x02" || scanner.Text() == "\x03" {
continue
}
if b.CLI != nil {
b.CLI.Output(b.Colorize().Color(scanner.Text()))
}

View File

@ -1,9 +1,11 @@
package tfe
import (
"bytes"
"context"
"fmt"
"io"
"math"
"net/http"
"net/url"
"time"
@ -11,11 +13,24 @@ import (
// LogReader implements io.Reader for streaming logs.
type LogReader struct {
client *Client
ctx context.Context
done func() (bool, error)
logURL *url.URL
offset int64
client *Client
ctx context.Context
done func() (bool, error)
logURL *url.URL
offset int64
reads int
startOfText bool
endOfText bool
}
// backoff will perform exponential backoff based on the iteration and
// limited by the provided min and max (in milliseconds) durations.
func backoff(min, max float64, iter int) time.Duration {
backoff := math.Pow(2, float64(iter)/5) * min
if backoff > max {
backoff = max
}
return time.Duration(backoff) * time.Millisecond
}
func (r *LogReader) Read(l []byte) (int, error) {
@ -26,11 +41,11 @@ func (r *LogReader) Read(l []byte) (int, error) {
// Loop until we can any data, the context is canceled or the
// run is finsished. If we would return right away without any
// data, we could and up causing a io.ErrNoProgress error.
for {
for r.reads = 1; ; r.reads++ {
select {
case <-r.ctx.Done():
return 0, r.ctx.Err()
case <-time.After(500 * time.Millisecond):
case <-time.After(backoff(500, 2000, r.reads)):
if written, err := r.read(l); err != io.ErrNoProgress {
return written, err
}
@ -70,16 +85,33 @@ func (r *LogReader) read(l []byte) (int, error) {
return written, err
}
if written > 0 {
// Check for an STX (Start of Text) ASCII control marker.
if !r.startOfText && bytes.Contains(l, []byte("\x02")) {
r.startOfText = true
}
// If we found an STX ASCII control character, start looking for
// the ETX (End of Text) control character.
if r.startOfText && bytes.Contains(l, []byte("\x03")) {
r.endOfText = true
}
}
// Check if we need to continue the loop and wait 500 miliseconds
// before checking if there is a new chunk available or that the
// run is finished and we are done reading all chunks.
if written == 0 {
done, err := r.done()
if err != nil {
return 0, err
}
if done {
return 0, io.EOF
if (r.startOfText && r.endOfText) || // The logstream finished without issues.
(r.startOfText && r.reads%10 == 0) || // The logstream terminated unexpectedly.
(!r.startOfText && r.reads > 1) { // The logstream doesn't support STX/ETX.
done, err := r.done()
if err != nil {
return 0, err
}
if done {
return 0, io.EOF
}
}
return 0, io.ErrNoProgress
}

6
vendor/vendor.json vendored
View File

@ -1804,10 +1804,10 @@
"revisionTime": "2018-07-12T07:51:27Z"
},
{
"checksumSHA1": "9EZuhp7LWTAVsTDpP9DzajjmJxg=",
"checksumSHA1": "bZzpA/TNWpYzVGIFEWLpOz7AXCU=",
"path": "github.com/hashicorp/go-tfe",
"revision": "ed986a3b38aba4630ca6ae7dbc876eb0d0c95c57",
"revisionTime": "2018-10-10T13:21:10Z"
"revision": "937a37d8d40df424b1e47fe05de0548727154efc",
"revisionTime": "2018-10-11T20:03:11Z"
},
{
"checksumSHA1": "85XUnluYJL7F55ptcwdmN8eSOsk=",