Commit f7f0f08c authored by Jacob Vosmaer's avatar Jacob Vosmaer

Merge branch 'dynamic-img-resizing-add-logging' into 'master'

Add additional logging for dynamic img resizing

See merge request gitlab-org/gitlab-workhorse!562
parents fb1c84c8 337a4220
......@@ -33,11 +33,16 @@ var SendScaledImage = &resizer{"send-scaled-img:"}
type resizeParams struct {
Location string
Width uint
Format string
}
const maxImageScalerProcs = 100
type processCounter struct {
n int32
}
var numScalerProcs processCounter
var numScalerProcs int32 = 0
const maxImageScalerProcs = 100
// Images might be located remotely in object storage, in which case we need to stream
// it via http(s)
......@@ -58,20 +63,37 @@ var httpClient = &http.Client{
Transport: httpTransport,
}
var imageResizeConcurrencyMax = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "gitlab_workhorse_max_image_resize_requests_exceeded_total",
Help: "Amount of image resizing requests that exceed the maximum allowed scaler processes",
},
var (
imageResizeConcurrencyLimitExceeds = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "gitlab_workhorse_image_resize_concurrency_limit_exceeds_total",
Help: "Amount of image resizing requests that exceeded the maximum allowed scaler processes",
},
)
imageResizeProcesses = prometheus.NewGauge(
prometheus.GaugeOpts{
Name: "gitlab_workhorse_image_resize_processes",
Help: "Amount of image resizing scaler processes working now",
},
)
imageResizeCompleted = prometheus.NewCounter(
prometheus.CounterOpts{
Name: "gitlab_workhorse_image_resize_completed_total",
Help: "Amount of image resizing processes sucessfully completed",
},
)
)
func init() {
prometheus.MustRegister(imageResizeConcurrencyMax)
prometheus.MustRegister(imageResizeConcurrencyLimitExceeds)
prometheus.MustRegister(imageResizeProcesses)
prometheus.MustRegister(imageResizeCompleted)
}
// This Injecter forks into graphicsmagick to resize an image identified by path or URL
// and streams the resized image back to the client
func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData string) {
start := time.Now()
logger := log.ContextLogger(req.Context())
params, err := r.unpackParameters(paramsData)
if err != nil {
......@@ -81,7 +103,7 @@ func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData st
return
}
sourceImageReader, err := openSourceImage(params.Location)
sourceImageReader, filesize, err := openSourceImage(params.Location)
if err != nil {
// This means we cannot even read the input image; fail fast.
helper.Fail500(w, req, fmt.Errorf("ImageResizer: Failed opening image data stream: %v", err))
......@@ -93,6 +115,7 @@ func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData st
// simply fail over to rendering out the original image unchanged.
imageReader, resizeCmd := tryResizeImage(req.Context(), sourceImageReader, params.Width, logger)
defer helper.CleanUpProcessGroup(resizeCmd)
imageResizeCompleted.Inc()
w.Header().Del("Content-Length")
bytesWritten, err := io.Copy(w, imageReader)
......@@ -101,7 +124,13 @@ func (r *resizer) Inject(w http.ResponseWriter, req *http.Request, paramsData st
return
}
logger.WithField("bytes_written", bytesWritten).Print("ImageResizer: success")
logger.WithFields(log.Fields{
"bytes_written": bytesWritten,
"duration_s": time.Since(start).Seconds(),
"target_width": params.Width,
"format": params.Format,
"original_filesize": filesize,
}).Printf("ImageResizer: Success")
}
func (r *resizer) unpackParameters(paramsData string) (*resizeParams, error) {
......@@ -119,17 +148,13 @@ func (r *resizer) unpackParameters(paramsData string) (*resizeParams, error) {
// Attempts to rescale the given image data, or in case of errors, falls back to the original image.
func tryResizeImage(ctx context.Context, r io.Reader, width uint, logger *logrus.Entry) (io.Reader, *exec.Cmd) {
// Only allow more scaling requests if we haven't yet reached the maximum allows number
// of concurrent graphicsmagick processes
if n := atomic.AddInt32(&numScalerProcs, 1); n > maxImageScalerProcs {
atomic.AddInt32(&numScalerProcs, -1)
imageResizeConcurrencyMax.Inc()
if !numScalerProcs.tryIncrement() {
return r, nil
}
go func() {
<-ctx.Done()
atomic.AddInt32(&numScalerProcs, -1)
numScalerProcs.decrement()
}()
resizeCmd, resizedImageReader, err := startResizeImageCommand(ctx, r, logger.Writer(), width)
......@@ -162,22 +187,60 @@ func isURL(location string) bool {
return strings.HasPrefix(location, "http://") || strings.HasPrefix(location, "https://")
}
func openSourceImage(location string) (io.ReadCloser, error) {
if !isURL(location) {
return os.Open(location)
func openSourceImage(location string) (io.ReadCloser, int64, error) {
if isURL(location) {
return openFromUrl(location)
}
return openFromFile(location)
}
func openFromUrl(location string) (io.ReadCloser, int64, error) {
res, err := httpClient.Get(location)
if err != nil {
return nil, err
return nil, 0, err
}
if res.StatusCode != http.StatusOK {
res.Body.Close()
return nil, fmt.Errorf("ImageResizer: cannot read data from %q: %d %s",
return nil, 0, fmt.Errorf("ImageResizer: cannot read data from %q: %d %s",
location, res.StatusCode, res.Status)
}
return res.Body, nil
return res.Body, res.ContentLength, nil
}
func openFromFile(location string) (io.ReadCloser, int64, error) {
file, err := os.Open(location)
if err != nil {
return file, 0, err
}
fi, err := file.Stat()
if err != nil {
return file, 0, err
}
return file, fi.Size(), nil
}
// Only allow more scaling requests if we haven't yet reached the maximum
// allowed number of concurrent scaler processes
func (c *processCounter) tryIncrement() bool {
if p := atomic.AddInt32(&c.n, 1); p > maxImageScalerProcs {
c.decrement()
imageResizeConcurrencyLimitExceeds.Inc()
return false
}
imageResizeProcesses.Set(float64(c.n))
return true
}
func (c *processCounter) decrement() {
atomic.AddInt32(&c.n, -1)
imageResizeProcesses.Set(float64(c.n))
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment