-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathweb_asset_handler.go
241 lines (215 loc) · 6.2 KB
/
web_asset_handler.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
package pgs
import (
"fmt"
"io"
"log/slog"
"net/http"
"net/url"
"path/filepath"
"regexp"
"strconv"
"strings"
"net/http/httputil"
_ "net/http/pprof"
"github.com/picosh/pgs/db"
"github.com/picosh/pgs/storage"
sst "github.com/picosh/pobj/storage"
)
type ApiAssetHandler struct {
*WebRouter
Logger *slog.Logger
UserID string
Username string
ProjectDir string
Feature db.Feature
Subdomain string
Filepath string
Bucket sst.Bucket
ImgProcessOpts *storage.ImgProcessOpts
}
func hasProtocol(url string) bool {
isFullUrl := strings.HasPrefix(url, "http://") || strings.HasPrefix(url, "https://")
return isFullUrl
}
func (h *ApiAssetHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
logger := h.Logger
var redirects []*RedirectRule
redirectFp, redirectInfo, err := h.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_redirects"))
if err == nil {
defer redirectFp.Close()
if redirectInfo != nil && redirectInfo.Size > h.Feature.GetSpecialFileMax() {
errMsg := fmt.Sprintf("_redirects file is too large (%d > %d)", redirectInfo.Size, h.Feature.GetSpecialFileMax())
logger.Error(errMsg)
http.Error(w, errMsg, http.StatusInternalServerError)
return
}
buf := new(strings.Builder)
lr := io.LimitReader(redirectFp, h.Feature.GetSpecialFileMax())
_, err := io.Copy(buf, lr)
if err != nil {
logger.Error("io copy", "err", err.Error())
http.Error(w, "cannot read _redirects file", http.StatusInternalServerError)
return
}
redirects, err = parseRedirectText(buf.String())
if err != nil {
logger.Error("could not parse redirect text", "err", err.Error())
}
}
routes := calcRoutes(h.ProjectDir, h.Filepath, redirects)
var contents io.ReadCloser
assetFilepath := ""
var info *sst.ObjectInfo
status := http.StatusOK
attempts := []string{}
for _, fp := range routes {
destUrl, err := url.Parse(fp.Filepath)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
destUrl.RawQuery = r.URL.RawQuery
if checkIsRedirect(fp.Status) {
// hack: check to see if there's an index file in the requested directory
// before redirecting, this saves a hop that will just end up a 404
if !hasProtocol(fp.Filepath) && strings.HasSuffix(fp.Filepath, "/") {
next := filepath.Join(h.ProjectDir, fp.Filepath, "index.html")
_, _, err := h.Storage.GetObject(h.Bucket, next)
if err != nil {
continue
}
}
logger.Info(
"redirecting request",
"destination", destUrl.String(),
"status", fp.Status,
)
http.Redirect(w, r, destUrl.String(), fp.Status)
return
} else if hasProtocol(fp.Filepath) {
/* if !h.HasPicoPlus {
msg := "must be pico+ user to fetch content from external source"
logger.Error(
msg,
"destination", destUrl.String(),
"status", fp.Status,
)
http.Error(w, msg, http.StatusUnauthorized)
return
} */
logger.Info(
"fetching content from external service",
"destination", destUrl.String(),
"status", fp.Status,
)
proxy := httputil.NewSingleHostReverseProxy(destUrl)
oldDirector := proxy.Director
proxy.Director = func(r *http.Request) {
oldDirector(r)
r.Host = destUrl.Host
r.URL = destUrl
}
// Disable caching
proxy.ModifyResponse = func(r *http.Response) error {
r.Header.Set("cache-control", "no-cache")
return nil
}
proxy.ServeHTTP(w, r)
return
}
attempts = append(attempts, fp.Filepath)
logger = logger.With("filename", fp.Filepath)
var c io.ReadCloser
c, info, err = h.Storage.ServeObject(
h.Bucket,
fp.Filepath,
h.ImgProcessOpts,
)
if err == nil {
contents = c
assetFilepath = fp.Filepath
status = fp.Status
break
}
}
if assetFilepath == "" {
logger.Info(
"asset not found in bucket",
"routes", strings.Join(attempts, ", "),
"status", http.StatusNotFound,
)
http.Error(w, "404 not found", http.StatusNotFound)
return
}
defer contents.Close()
var headers []*HeaderRule
headersFp, headersInfo, err := h.Storage.GetObject(h.Bucket, filepath.Join(h.ProjectDir, "_headers"))
if err == nil {
defer headersFp.Close()
if headersInfo != nil && headersInfo.Size > h.Feature.GetSpecialFileMax() {
errMsg := fmt.Sprintf("_headers file is too large (%d > %d)", headersInfo.Size, h.Feature.GetSpecialFileMax())
logger.Error(errMsg)
http.Error(w, errMsg, http.StatusInternalServerError)
return
}
buf := new(strings.Builder)
lr := io.LimitReader(headersFp, h.Feature.GetSpecialFileMax())
_, err := io.Copy(buf, lr)
if err != nil {
logger.Error("io copy", "err", err.Error())
http.Error(w, "cannot read _headers file", http.StatusInternalServerError)
return
}
headers, err = parseHeaderText(buf.String())
if err != nil {
logger.Error("could not parse header text", "err", err.Error())
}
}
userHeaders := []*HeaderLine{}
for _, headerRule := range headers {
rr := regexp.MustCompile(headerRule.Path)
match := rr.FindStringSubmatch(assetFilepath)
if len(match) > 0 {
userHeaders = headerRule.Headers
}
}
contentType := ""
if info != nil {
contentType = info.Metadata.Get("content-type")
if info.Size != 0 {
w.Header().Add("content-length", strconv.Itoa(int(info.Size)))
}
if info.ETag != "" {
// Minio SDK trims off the mandatory quotes (RFC 7232 § 2.3)
w.Header().Add("etag", fmt.Sprintf("\"%s\"", info.ETag))
}
if !info.LastModified.IsZero() {
w.Header().Add("last-modified", info.LastModified.UTC().Format(http.TimeFormat))
}
}
for _, hdr := range userHeaders {
w.Header().Add(hdr.Name, hdr.Value)
}
if w.Header().Get("content-type") == "" {
w.Header().Set("content-type", contentType)
}
// Allows us to invalidate the cache when files are modified
w.Header().Set("surrogate-key", h.Subdomain)
finContentType := w.Header().Get("content-type")
logger.Info(
"serving asset",
"asset", assetFilepath,
"status", status,
"contentType", finContentType,
)
done, _ := checkPreconditions(w, r, info.LastModified.UTC())
if done {
// A conditional request was detected, status and headers are set, no body required (either 412 or 304)
return
}
w.WriteHeader(status)
_, err = io.Copy(w, contents)
if err != nil {
logger.Error("io copy", "err", err.Error())
}
}