Commit 238e32b9 authored by Kirill Smelkov's avatar Kirill Smelkov

.

parent 81b9a523
......@@ -5,10 +5,9 @@ import (
"encoding/json"
"errors"
"io"
// "os"
"log"
"net/http"
"strings"
"log"
"time"
)
......
// Handler for raw blob downloads
//
// Blobs are read via `git cat-file ...` with first querying authentication
// backend about download-access for containing repository. Replies from
// authentication backend are cached for 30 seconds to keep access-to-blobs
// latency to minimum.
// backend about download-access premission for containing repository.
// Replies from authentication backend are cached for 30 seconds to keep
// access-to-blobs latency to minimum.
package main
import (
"bufio"
"fmt"
"io"
"log"
"fmt"
"bufio"
"time"
"strings"
"bytes"
"regexp"
"net/http"
"net/http/httptest"
"regexp"
"strings"
"time"
)
// Reply from auth backend for "download from repo" authorization request
type AuthReply struct {
w *httptest.ResponseRecorder // output of backend/preAuthorizeHandler
authorizationResponse // parsed auth response from preAuthorizeHandler
// raw reply from auth backend & preAuthorizeHandler().
// recorded so we can replay it from auth cache to each client in full
// if access is rejected.
RawReply *httptest.ResponseRecorder
// decoded auth reply
authorizationResponse
}
// Entry in authorization reply cache
type AuthCacheEntry struct {
AuthReply
Tauth int64 // in seconds XXX do we strictly need this?
Tauth int64 // in seconds XXX needed?
// how many times this entry was hit when querying auth cache during
// the last refresh period.
......@@ -45,8 +48,7 @@ type AuthCacheEntry struct {
var authCache = make(map[string]*AuthCacheEntry)
// Time period for refreshing / removing unused entires in authCache
const authCacheRefresh = 5 * time.Second // XXX -> 30
const authCacheRefresh = 30 * time.Second
// Goroutine to refresh auth cache entry periodically while it is used.
// if the entry is detected to be not used - remove it from cache and stop refreshing.
......@@ -54,15 +56,14 @@ func authRefreshEntry(u *upstream, project string) {
// XXX auth := authCache[project]
// and then use auth without authCache lookup ?
for ;; {
//log.Printf("AUTH refresh sleep ...")
for {
time.Sleep(authCacheRefresh)
// XXX lock
auth, ok := authCache[project]
if !ok { // someone removed the entry from cache - no
log.Printf("AUTH refresh - %v entry removed", project)
break // need to further refresh XXX ok?
break // no need to further refresh
}
log.Printf("AUTH refresh - %v #hit: %v", project, auth.Nhit)
......@@ -90,47 +91,45 @@ func authRefreshEntry(u *upstream, project string) {
}
}
// Ask auth backend about whether download is ok for a project
func askAuthBackend(u *upstream, project string) (AuthReply, error) {
authReply := AuthReply{
w: httptest.NewRecorder(),
RawReply: httptest.NewRecorder(),
}
// Request to auth backend to verify whether download is possible via
// asking as git fetch would do.
// asking as `git fetch` would do.
// XXX privateToken not propagated, etc ...
reqDownloadAccess, err := http.NewRequest("GET", project + ".git/info/refs?service=git-upload-pack", nil)
reqDownloadAccess, err := http.NewRequest("GET",
project+".git/info/refs?service=git-upload-pack", nil)
if err != nil {
fail500(authReply.w, "GET git-upload-pack", err)
fail500(authReply.RawReply, "GET git-upload-pack", err)
return authReply, err
}
// prepare everything and go through preAuthorizeHandler that will send
// prepare everything and go through preAuthorizeHandler() that will send
// request to auth backend and analyze/parse the reply into r.authorizationResponse
r := &gitRequest{
Request: reqDownloadAccess,
u: u,
}
// XXX what if it gets stuck?
preAuthorizeHandler(
func(w http.ResponseWriter, r *gitRequest) {
// if we ever get to this point - auth handler approved
// access and thus it is ok to download
// downloadOk = true
// downloadOk = true XXX
// NOTE we can use authorizationResponse.RepoPath != "" as test for this
}, "") (authReply.w, r)
}, "")(authReply.RawReply, r)
// propagate authorizationResponse back and we are done
authReply.authorizationResponse = r.authorizationResponse
return authReply, nil
}
// Verify that download access is ok or not.
// first we try to see authCache; if information is not there -> ask auth backend
// XXX return -> *AuthReply ?
func verifyDownloadAccess(w http.ResponseWriter, u *upstream, project string) (AuthReply, error) {
// XXX lock authCache
auth, ok := authCache[project]
......@@ -140,7 +139,7 @@ func verifyDownloadAccess(w http.ResponseWriter, u *upstream, project string) (A
project,
time.Since(time.Unix(auth.Tauth, 0)),
auth.Nhit)
return auth.AuthReply, nil // XXX make pointer?
return auth.AuthReply, nil
}
authReply, err := askAuthBackend(u, project)
......@@ -148,7 +147,7 @@ func verifyDownloadAccess(w http.ResponseWriter, u *upstream, project string) (A
return authReply, err
}
// XXX do we need to lock authCache ?
// XXX lock
// store in cache and start cache entry refresher
authCache[project] = &AuthCacheEntry{authReply, time.Now().Unix(), 0}
go authRefreshEntry(u, project)
......@@ -156,8 +155,6 @@ func verifyDownloadAccess(w http.ResponseWriter, u *upstream, project string) (A
return authReply, nil
}
// HTTP handler for `.../raw/<ref>/path`
var projectRe = regexp.MustCompile(`^/[\w\.-]+/[\w\.-]+/`)
......@@ -174,8 +171,8 @@ func handleGetBlobRaw(w http.ResponseWriter, r *gitRequest) {
fail500(w, "refpath != raw/...", nil) // XXX err=nil
return
}
project = project[:len(project)-1]
refpath = refpath[4:]
project = project[:len(project)-1] // strip '.../'
refpath = refpath[4:] // strip 'raw/...'
// Query download access auth for this project
authReply, err := verifyDownloadAccess(w, r.u, project)
......@@ -187,11 +184,11 @@ func handleGetBlobRaw(w http.ResponseWriter, r *gitRequest) {
// access denied - copy auth reply to client in full -
// there are HTTP code and other headers / body relevant for
// about why access was denied.
for k, v := range authReply.w.HeaderMap {
for k, v := range authReply.RawReply.HeaderMap {
w.Header()[k] = v
}
w.WriteHeader(authReply.w.Code)
io.Copy(w, authReply.w.Body)
w.WriteHeader(authReply.RawReply.Code)
io.Copy(w, authReply.RawReply.Body)
return
}
......@@ -210,7 +207,6 @@ Content-Transfer-Encoding: binary
Content-Type: text/plain; charset=utf-8
*/
// Emit content of blob located at <ref>/path (jointly denoted as 'refpath') to output
func emitBlob(w http.ResponseWriter, repopath string, refpath string) {
// Communicate with `git cat-file --batch` trying refs from longest
......@@ -237,14 +233,14 @@ func emitBlob(w http.ResponseWriter, repopath string, refpath string) {
return
}
defer cleanUpProcessGroup(queryCmd)
// XXX also set communication timeout ?
// refpath components as vector
refpathv := strings.Split(refpath, "/")
// scan from right to left and try to change '/' -> ':' and see if it
// creates a correct object name. If it does - we read object content
// which follows.
// TODO handle communication timeout ?
// creates a correct git object name. If it does - we read object
// content which follows.
var sha1, type_ string
var size int64
for i := len(refpathv); i > 0; i-- {
......@@ -266,7 +262,7 @@ func emitBlob(w http.ResponseWriter, repopath string, refpath string) {
log.Printf("<- %s", reply)
// <object> SP missing LF
if bytes.HasSuffix(reply, " missing\n") {
if strings.HasSuffix(reply, " missing\n") {
continue
}
......@@ -274,31 +270,28 @@ func emitBlob(w http.ResponseWriter, repopath string, refpath string) {
_, err = fmt.Sscanf(reply, "%s %s %d\n", &sha1, &type_, &size)
if err != nil {
fail500(w, "git cat-file --batch; reply parse", err)
return;
return
}
if type_ != "blob" {
// XXX -> 404
fail500(w, fmt.Sprintf("git cat-file --batch-check; %v is not blob (is %v)", sha1, type_), nil)
return
log.Printf("git cat-file --batch-check; %v is not blob (is %v)", sha1, type_)
sha1 = "" // so it will return 404
}
// so we found this blob object
break
}
// was the blob found?
// Blob not found -> 404
if sha1 == "" {
// XXX -> 404
fail400(w, "Blob not found", nil)
fail400(w, fmt.Sprintf("Blob for %v not found", refpath), nil)
return
}
log.Printf("blob found, size: %d", size)
//setRawHeaders(...)
w.WriteHeader(200) // XXX too early
log.Printf("111")
// Blob found - start writing response
//setRawHeaders(...) // TODO
w.WriteHeader(200) // Don't bother with HTTP 500 from this point on, just return
// XXX better use queryStdout instead of queryReader, but we could be
// holding some tail bytes in queryReader after chat phase
_, err = io.CopyN(w, queryReader, size)
......@@ -306,14 +299,14 @@ func emitBlob(w http.ResponseWriter, repopath string, refpath string) {
logContext("io.CopyN", err)
return
}
log.Printf("222")
// close git stdin explicitly, so it exits cleanly
err = queryStdin.Close()
if err != nil {
fail500(w, "queryStdin.Close", nil)
logContext("queryStdin.Close", err)
return
}
log.Printf("333")
err = queryCmd.Wait()
if err != nil {
logContext("wait", err)
......
......@@ -7,10 +7,10 @@ package main
import (
"fmt"
"io"
"log"
"net/http"
"path/filepath"
"strings"
"log"
)
func handleGetInfoRefs(w http.ResponseWriter, r *gitRequest) {
......
......@@ -90,6 +90,7 @@ var gitServices = [...]gitService{
func newUpstream(authBackend string, authTransport http.RoundTripper) *upstream {
return &upstream{&http.Client{Transport: authTransport}, authBackend}
// XXX Timeout: ... ?
}
func (u *upstream) ServeHTTP(w http.ResponseWriter, r *http.Request) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment