Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Seaweedfs+cannyls backend #194

Open
wants to merge 7 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,12 @@ build:
cd integrate && bash buildyig.sh $(BUILDDIR)

build_internal:
go build $(URL)/$(REPO)
go build -tags ceph $(URL)/$(REPO)
bash plugins/build_plugins_internal.sh
go build $(PWD)/tools/admin.go
go build $(PWD)/tools/delete.go
go build $(PWD)/tools/getrediskeys.go
go build $(PWD)/tools/lc.go
go build -tags ceph $(PWD)/tools/admin.go
go build -tags ceph $(PWD)/tools/delete.go
go build -tags ceph $(PWD)/tools/getrediskeys.go
go build -tags ceph $(PWD)/tools/lc.go
cp -f $(PWD)/plugins/*.so $(PWD)/integrate/yigconf/plugins/

pkg:
Expand Down
4 changes: 1 addition & 3 deletions api/generic-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -275,9 +275,7 @@ var notimplementedObjectResourceNames = map[string]bool{

func GetBucketAndObjectInfoFromRequest(r *http.Request) (bucketName string, objectName string) {
splits := strings.SplitN(r.URL.Path[1:], "/", 2)
v := strings.Split(r.Host, ":")
hostWithOutPort := v[0]
ok, bucketName := helper.HasBucketInDomain(hostWithOutPort, ".", helper.CONFIG.S3Domain)
ok, bucketName := helper.HasBucketInDomain(r.Host, ".", helper.CONFIG.S3Domain)
if ok {
if len(splits) == 1 {
objectName = splits[0]
Expand Down
13 changes: 13 additions & 0 deletions api/limit_ceph.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// +build ceph

package api

// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
const (
// maximum object size per PUT request is 5GiB
maxObjectSize = 1024 * 1024 * 1024 * 5
// minimum Part size for multipart upload is 5MB
minPartSize = 1024 * 1024 * 5
// maximum Part ID for multipart upload is 10000 (Acceptable values range from 1 to 10000 inclusive)
maxPartID = 10000
)
13 changes: 13 additions & 0 deletions api/limit_seaweedfs.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
// +build seaweedfs

package api

const (
// maximum object size per PUT request is 30MB, limit introduced by cannyls
maxObjectSize = 1024 * 1024 * 30
// minimum Part size for multipart upload is 5MB
minPartSize = 1024 * 1024 * 5
// maximum Part ID for multipart upload is 100000 (Acceptable values range from 1 to 100000 inclusive)
// increase maxPartID to 100000 so as to keep max Object size to 3TB, changed because of maxObjectSize
maxPartID = 100000
)
2 changes: 1 addition & 1 deletion api/object-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -236,7 +236,7 @@ func (api ObjectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req
n, err := w.Write(p)
if n > 0 {
/*
If the whole write or only part of write is successfull,
If the whole write or only part of write is successful,
n should be positive, so record this
*/
w.(*ResponseRecorder).size += int64(n)
Expand Down
10 changes: 0 additions & 10 deletions api/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,16 +40,6 @@ func checkValidMD5(md5 string) ([]byte, error) {
return base64.StdEncoding.DecodeString(strings.TrimSpace(md5))
}

/// http://docs.aws.amazon.com/AmazonS3/latest/dev/UploadingObjects.html
const (
// maximum object size per PUT request is 5GiB
maxObjectSize = 1024 * 1024 * 1024 * 5
// minimum Part size for multipart upload is 5MB
minPartSize = 1024 * 1024 * 5
// maximum Part ID for multipart upload is 10000 (Acceptable values range from 1 to 10000 inclusive)
maxPartID = 10000
)

// isMaxObjectSize - verify if max object size
func isMaxObjectSize(size int64) bool {
return size > maxObjectSize
Expand Down
42 changes: 42 additions & 0 deletions backend/backend.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
package backend

import (
"github.com/journeymidnight/yig/helper"
"github.com/journeymidnight/yig/log"
"github.com/journeymidnight/yig/meta/types"
"io"
)

type Usage struct {
UsedSpacePercent int // range 0 ~ 100
}

type Cluster interface {
// get cluster ID
ID() string
// get cluster usage statistics
GetUsage() (Usage, error)
// put new object to storage Cluster
Put(poolName string, object io.Reader) (objectName string,
bytesWritten uint64, err error)
// append a new chunk to object, empty existName means new object
Append(poolName, existName string, objectChunk io.Reader,
offset int64) (objectName string, bytesWritten uint64, err error)
// get a ReadCloser for object, length == 0 means get the whole object
GetReader(poolName, objectName string,
offset int64, length uint64) (io.ReadCloser, error)
// remove an object
Remove(poolName, objectName string) error
}

// Backend plugins should implement this interface
type Plugin interface {
// initialize backend cluster handlers,
// returns cluster ID -> Cluster, panic on errors
Initialize(logger *log.Logger, config helper.Config) map[string]Cluster
// pick a cluster for specific object upload
// XXX: this is ugly and subject to change
PickCluster(clusters map[string]Cluster, weights map[string]int,
size uint64, class types.StorageClass,
objectType types.ObjectType) (cluster Cluster, pool string, err error)
}
Loading