Commit 14d70b3b authored by Jacob Vosmaer's avatar Jacob Vosmaer

Merge remote-tracking branch 'origin/master' into refactor-upstream

parents e4569081 20eea011
# gitlab-workhorse # gitlab-workhorse
gitlab-workhorse was designed to unload Git HTTP traffic from Gitlab-workhorse is a smart reverse proxy for GitLab. It handles
the GitLab Rails app (Unicorn) to a separate daemon. It also serves "large" HTTP requests such as file downloads, file uploads, Git
'git archive' downloads for GitLab. All authentication and push/pull and Git archive downloads.
authorization logic is still handled by the GitLab Rails app.
Architecture: Git client -> NGINX -> gitlab-workhorse (makes
auth request to GitLab Rails app) -> git-upload-pack
## Usage ## Usage
...@@ -18,6 +15,10 @@ Options: ...@@ -18,6 +15,10 @@ Options:
Authentication/authorization backend (default "http://localhost:8080") Authentication/authorization backend (default "http://localhost:8080")
-authSocket string -authSocket string
Optional: Unix domain socket to dial authBackend at Optional: Unix domain socket to dial authBackend at
-developmentMode
Allow to serve assets from Rails app
-documentRoot string
Path to static files content (default "public")
-listenAddr string -listenAddr string
Listen address for HTTP server (default "localhost:8181") Listen address for HTTP server (default "localhost:8181")
-listenNetwork string -listenNetwork string
...@@ -26,19 +27,17 @@ Options: ...@@ -26,19 +27,17 @@ Options:
Umask for Unix socket, default: 022 (default 18) Umask for Unix socket, default: 022 (default 18)
-pprofListenAddr string -pprofListenAddr string
pprof listening address, e.g. 'localhost:6060' pprof listening address, e.g. 'localhost:6060'
-proxyHeadersTimeout duration
How long to wait for response headers when proxying the request (default 1m0s)
-version -version
Print version and exit Print version and exit
``` ```
gitlab-workhorse allows Git HTTP clients to push and pull to The 'auth backend' refers to the GitLab Rails applicatoin. The name is
and from Git repositories. Each incoming request is first replayed a holdover from when gitlab-workhorse only handled Git push/pull over
(with an empty request body) to an external authentication/authorization HTTP.
HTTP server: the 'auth backend'. The auth backend is expected to
be a GitLab Unicorn process. The 'auth response' is a JSON message
which tells gitlab-workhorse the path of the Git repository
to read from/write to.
gitlab-workhorse can listen on either a TCP or a Unix domain socket. It Gitlab-workhorse can listen on either a TCP or a Unix domain socket. It
can also open a second listening TCP listening socket with the Go can also open a second listening TCP listening socket with the Go
[net/http/pprof profiler server](http://golang.org/pkg/net/http/pprof/). [net/http/pprof profiler server](http://golang.org/pkg/net/http/pprof/).
......
...@@ -16,6 +16,7 @@ import ( ...@@ -16,6 +16,7 @@ import (
"os/exec" "os/exec"
"path" "path"
"path/filepath" "path/filepath"
"syscall"
"time" "time"
) )
...@@ -84,6 +85,7 @@ func handleGetArchive(w http.ResponseWriter, r *http.Request, a *api.Response) { ...@@ -84,6 +85,7 @@ func handleGetArchive(w http.ResponseWriter, r *http.Request, a *api.Response) {
stdout = archiveStdout stdout = archiveStdout
} else { } else {
compressCmd.Stdin = archiveStdout compressCmd.Stdin = archiveStdout
compressCmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
stdout, err = compressCmd.StdoutPipe() stdout, err = compressCmd.StdoutPipe()
if err != nil { if err != nil {
...@@ -96,7 +98,7 @@ func handleGetArchive(w http.ResponseWriter, r *http.Request, a *api.Response) { ...@@ -96,7 +98,7 @@ func handleGetArchive(w http.ResponseWriter, r *http.Request, a *api.Response) {
helper.Fail500(w, fmt.Errorf("handleGetArchive: start %v: %v", compressCmd.Args, err)) helper.Fail500(w, fmt.Errorf("handleGetArchive: start %v: %v", compressCmd.Args, err))
return return
} }
defer compressCmd.Wait() defer cleanUpProcessGroup(compressCmd)
archiveStdout.Close() archiveStdout.Close()
} }
......
...@@ -60,7 +60,10 @@ func (s *errorPageResponseWriter) Flush() { ...@@ -60,7 +60,10 @@ func (s *errorPageResponseWriter) Flush() {
s.WriteHeader(http.StatusOK) s.WriteHeader(http.StatusOK)
} }
func (st *Static) ErrorPages(handler http.Handler) http.Handler { func (st *Static) ErrorPages(enabled bool, handler http.Handler) http.Handler {
if !enabled {
return handler
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
rw := errorPageResponseWriter{ rw := errorPageResponseWriter{
rw: w, rw: w,
......
...@@ -27,7 +27,7 @@ func TestIfErrorPageIsPresented(t *testing.T) { ...@@ -27,7 +27,7 @@ func TestIfErrorPageIsPresented(t *testing.T) {
fmt.Fprint(w, "Not Found") fmt.Fprint(w, "Not Found")
}) })
st := &Static{dir} st := &Static{dir}
st.ErrorPages(h).ServeHTTP(w, nil) st.ErrorPages(true, h).ServeHTTP(w, nil)
w.Flush() w.Flush()
helper.AssertResponseCode(t, w, 404) helper.AssertResponseCode(t, w, 404)
...@@ -48,9 +48,32 @@ func TestIfErrorPassedIfNoErrorPageIsFound(t *testing.T) { ...@@ -48,9 +48,32 @@ func TestIfErrorPassedIfNoErrorPageIsFound(t *testing.T) {
fmt.Fprint(w, errorResponse) fmt.Fprint(w, errorResponse)
}) })
st := &Static{dir} st := &Static{dir}
st.ErrorPages(h).ServeHTTP(w, nil) st.ErrorPages(true, h).ServeHTTP(w, nil)
w.Flush() w.Flush()
helper.AssertResponseCode(t, w, 404) helper.AssertResponseCode(t, w, 404)
helper.AssertResponseBody(t, w, errorResponse) helper.AssertResponseBody(t, w, errorResponse)
} }
func TestIfErrorPageIsIgnoredInDevelopment(t *testing.T) {
dir, err := ioutil.TempDir("", "error_page")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
errorPage := "ERROR"
ioutil.WriteFile(filepath.Join(dir, "500.html"), []byte(errorPage), 0600)
w := httptest.NewRecorder()
serverError := "Interesting Server Error"
h := http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(500)
fmt.Fprint(w, serverError)
})
st := &Static{dir}
st.ErrorPages(false, h).ServeHTTP(w, nil)
w.Flush()
helper.AssertResponseCode(t, w, 500)
helper.AssertResponseBody(t, w, serverError)
}
...@@ -88,7 +88,7 @@ func (u *Upstream) configureRoutes() { ...@@ -88,7 +88,7 @@ func (u *Upstream) configureRoutes() {
route{"", nil, route{"", nil,
static.ServeExisting(u.URLPrefix(), staticpages.CacheDisabled, static.ServeExisting(u.URLPrefix(), staticpages.CacheDisabled,
static.DeployPage( static.DeployPage(
static.ErrorPages( static.ErrorPages(u.DevelopmentMode,
proxy, proxy,
), ),
), ),
......
...@@ -81,7 +81,7 @@ func (u *Upstream) ServeHTTP(ow http.ResponseWriter, r *http.Request) { ...@@ -81,7 +81,7 @@ func (u *Upstream) ServeHTTP(ow http.ResponseWriter, r *http.Request) {
} }
// Check URL Root // Check URL Root
URIPath := urlprefix.CleanURIPath(r.URL.Path) URIPath := urlprefix.CleanURIPath(r.URL.EscapedPath())
prefix := u.URLPrefix() prefix := u.URLPrefix()
if !prefix.Match(URIPath) { if !prefix.Match(URIPath) {
httpError(&w, r, fmt.Sprintf("Not found %q", URIPath), http.StatusNotFound) httpError(&w, r, fmt.Sprintf("Not found %q", URIPath), http.StatusNotFound)
......
...@@ -15,6 +15,7 @@ import ( ...@@ -15,6 +15,7 @@ import (
"os/exec" "os/exec"
"path" "path"
"regexp" "regexp"
"strings"
"testing" "testing"
"time" "time"
) )
...@@ -199,6 +200,29 @@ func TestAllowedApiDownloadZip(t *testing.T) { ...@@ -199,6 +200,29 @@ func TestAllowedApiDownloadZip(t *testing.T) {
runOrFail(t, extractCmd) runOrFail(t, extractCmd)
} }
func TestAllowedApiDownloadZipWithSlash(t *testing.T) {
prepareDownloadDir(t)
// Prepare test server and backend
archiveName := "foobar.zip"
ts := testAuthServer(nil, 200, archiveOkBody(t, archiveName))
defer ts.Close()
ws := startWorkhorseServer(ts.URL)
defer ws.Close()
// Use foo%2Fbar instead of a numeric ID
downloadCmd := exec.Command("curl", "-J", "-O", fmt.Sprintf("%s/api/v3/projects/foo%%2Fbar/repository/archive.zip", ws.URL))
if !strings.Contains(downloadCmd.Args[3], `projects/foo%2Fbar/repository`) {
t.Fatalf("Cannot find percent-2F: %v", downloadCmd.Args)
}
downloadCmd.Dir = scratchDir
runOrFail(t, downloadCmd)
extractCmd := exec.Command("unzip", archiveName)
extractCmd.Dir = scratchDir
runOrFail(t, extractCmd)
}
func TestDownloadCacheHit(t *testing.T) { func TestDownloadCacheHit(t *testing.T) {
prepareDownloadDir(t) prepareDownloadDir(t)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment