Commit c389d3ab authored by Nick Thomas's avatar Nick Thomas

Merge branch 'allow-disabling-archive-cache' into 'master'

Add option to disable archive caching

See merge request gitlab-org/gitlab-workhorse!222
parents 1faea24a 1369f1d4
......@@ -3,6 +3,7 @@ package main
import (
"bytes"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
......@@ -448,19 +449,37 @@ func TestGetArchiveProxiedToGitalySuccessfully(t *testing.T) {
repoStorage := "default"
oid := "54fcc214b94e78d7a41a9a8fe6d87a5e59500e51"
repoRelativePath := "foo/bar.git"
archivePath := "my/path"
archivePrefix := "repo-1"
jsonParams := fmt.Sprintf(`{"GitalyServer":{"Address":"%s","Token":""},"GitalyRepository":{"storage_name":"%s","relative_path":"%s"},"ArchivePath":"%s","ArchivePrefix":"%s","CommitId":"%s"}`,
gitalyAddress, repoStorage, repoRelativePath, archivePath, archivePrefix, oid)
expectedBody := testhelper.GitalyGetArchiveResponseMock
archiveLength := len(expectedBody)
resp, body, err := doSendDataRequest("/archive.tar.gz", "git-archive", jsonParams)
require.NoError(t, err)
testCases := []struct {
archivePath string
cacheDisabled bool
}{
{archivePath: path.Join(scratchDir, "my/path"), cacheDisabled: false},
{archivePath: "/var/empty/my/path", cacheDisabled: true},
}
assert.Equal(t, 200, resp.StatusCode, "GET %q: status code", resp.Request.URL)
assert.Equal(t, expectedBody, string(body), "GET %q: response body", resp.Request.URL)
assert.Equal(t, archiveLength, len(body), "GET %q: body size", resp.Request.URL)
for _, tc := range testCases {
jsonParams := fmt.Sprintf(`{"GitalyServer":{"Address":"%s","Token":""},"GitalyRepository":{"storage_name":"%s","relative_path":"%s"},"ArchivePath":"%s","ArchivePrefix":"%s","CommitId":"%s","DisableCache":%v}`,
gitalyAddress, repoStorage, repoRelativePath, tc.archivePath, archivePrefix, oid, tc.cacheDisabled)
resp, body, err := doSendDataRequest("/archive.tar.gz", "git-archive", jsonParams)
require.NoError(t, err)
assert.Equal(t, 200, resp.StatusCode, "GET %q: status code", resp.Request.URL)
assert.Equal(t, expectedBody, string(body), "GET %q: response body", resp.Request.URL)
assert.Equal(t, archiveLength, len(body), "GET %q: body size", resp.Request.URL)
if tc.cacheDisabled {
_, err := os.Stat(tc.archivePath)
require.True(t, os.IsNotExist(err), "expected 'does not exist', got: %v", err)
} else {
cachedArchive, err := ioutil.ReadFile(tc.archivePath)
require.NoError(t, err)
require.Equal(t, expectedBody, string(cachedArchive))
}
}
}
func TestGetArchiveProxiedToGitalyInterruptedStream(t *testing.T) {
......
......@@ -30,6 +30,7 @@ type archiveParams struct {
CommitId string
GitalyServer gitaly.Server
GitalyRepository pb.Repository
DisableCache bool
}
var (
......@@ -61,32 +62,41 @@ func (a *archive) Inject(w http.ResponseWriter, r *http.Request, sendData string
return
}
cacheEnabled := !params.DisableCache
archiveFilename := path.Base(params.ArchivePath)
if cachedArchive, err := os.Open(params.ArchivePath); err == nil {
defer cachedArchive.Close()
gitArchiveCache.WithLabelValues("hit").Inc()
setArchiveHeaders(w, format, archiveFilename)
// Even if somebody deleted the cachedArchive from disk since we opened
// the file, Unix file semantics guarantee we can still read from the
// open file in this process.
http.ServeContent(w, r, "", time.Unix(0, 0), cachedArchive)
return
if cacheEnabled {
cachedArchive, err := os.Open(params.ArchivePath)
if err == nil {
defer cachedArchive.Close()
gitArchiveCache.WithLabelValues("hit").Inc()
setArchiveHeaders(w, format, archiveFilename)
// Even if somebody deleted the cachedArchive from disk since we opened
// the file, Unix file semantics guarantee we can still read from the
// open file in this process.
http.ServeContent(w, r, "", time.Unix(0, 0), cachedArchive)
return
}
}
gitArchiveCache.WithLabelValues("miss").Inc()
// We assume the tempFile has a unique name so that concurrent requests are
// safe. We create the tempfile in the same directory as the final cached
// archive we want to create so that we can use an atomic link(2) operation
// to finalize the cached archive.
tempFile, err := prepareArchiveTempfile(path.Dir(params.ArchivePath), archiveFilename)
if err != nil {
helper.Fail500(w, r, fmt.Errorf("SendArchive: create tempfile: %v", err))
return
var tempFile *os.File
var err error
if cacheEnabled {
// We assume the tempFile has a unique name so that concurrent requests are
// safe. We create the tempfile in the same directory as the final cached
// archive we want to create so that we can use an atomic link(2) operation
// to finalize the cached archive.
tempFile, err = prepareArchiveTempfile(path.Dir(params.ArchivePath), archiveFilename)
if err != nil {
helper.Fail500(w, r, fmt.Errorf("SendArchive: create tempfile: %v", err))
return
}
defer tempFile.Close()
defer os.Remove(tempFile.Name())
}
defer tempFile.Close()
defer os.Remove(tempFile.Name())
var archiveReader io.Reader
if params.GitalyServer.Address != "" {
......@@ -103,7 +113,10 @@ func (a *archive) Inject(w http.ResponseWriter, r *http.Request, sendData string
return
}
reader := io.TeeReader(archiveReader, tempFile)
reader := archiveReader
if cacheEnabled {
reader = io.TeeReader(archiveReader, tempFile)
}
// Start writing the response
setArchiveHeaders(w, format, archiveFilename)
......@@ -113,9 +126,12 @@ func (a *archive) Inject(w http.ResponseWriter, r *http.Request, sendData string
return
}
if err := finalizeCachedArchive(tempFile, params.ArchivePath); err != nil {
helper.LogError(r, fmt.Errorf("SendArchive: finalize cached archive: %v", err))
return
if cacheEnabled {
err := finalizeCachedArchive(tempFile, params.ArchivePath)
if err != nil {
helper.LogError(r, fmt.Errorf("SendArchive: finalize cached archive: %v", err))
return
}
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment