vendor: github.com/t3rm1n4l for backend/mega

This commit is contained in:
Nick Craig-Wood 2018-04-13 11:51:28 +01:00
parent 9948b39dba
commit f50b85278a
11 changed files with 2917 additions and 1 deletions

8
Gopkg.lock generated
View file

@ -323,6 +323,12 @@
]
revision = "380174f817a09abe5982a82f94ad50938a8df65d"
[[projects]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"
packages = ["."]
revision = "4e68b16e97ffc3b77abacbf727817a4d48fb0b66"
[[projects]]
branch = "master"
name = "github.com/xanzy/ssh-agent"
@ -475,6 +481,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "50d82f1173802259032be4dddb962f1b7ed8eebdbc24c73febcde47d8deecb30"
inputs-digest = "09939c0d5f32998497c8304c84dd5c397c88816d235441d87f5306ae5db43b8a"
solver-name = "gps-cdcl"
solver-version = 1

View file

@ -148,3 +148,7 @@
[[constraint]]
branch = "master"
name = "github.com/sevlyar/go-daemon"
[[constraint]]
branch = "master"
name = "github.com/t3rm1n4l/go-mega"

29
vendor/github.com/t3rm1n4l/go-mega/.travis.yml generated vendored Normal file
View file

@ -0,0 +1,29 @@
language: go
sudo: false
osx_image: xcode7.3
os:
- linux
go:
- 1.6.4
- 1.7.6
- 1.8.7
- 1.9.5
- "1.10.1"
- tip
install:
- make build_dep
script:
- make check
- make test
matrix:
allow_failures:
- go: tip
include:
- os: osx
go: "1.10.1"
env:
global:
- secure: RzsF80V1i69FVJwKSF8WrFzk5bRUKtPxRkhjiLOO0b1usFg0EIY6XFp3s/VTR6oT91LRXml3Bp7wHHrkPvGnHyUyuxj6loj3gIrsX8cZHUtjyQX/Szfi9MOJpbdJvfCcHByEh9YGldAz//9zvEo5oGuI29Luur3cv+BJNJElmHg=
- secure: Eu3kWJbxpKyioitPQo75gI3gL/HKEHVMdp6YLxxcmlrbG2xyXdlFhTB2YkkmnC8jNvf7XJWdtYnhlWM9MrNY1fUiRyGSAmpSlzzCa9XQ9lCv0hUH57+D3PAcH6gdgKn6q1iOk26CxOCKAHVaj5xdDMIyCc4mD+sLyTDQhBIHABc=
notifications:
email: false

18
vendor/github.com/t3rm1n4l/go-mega/Makefile generated vendored Normal file
View file

@ -0,0 +1,18 @@
build:
go build
test:
go test -cpu 4 -v -race
# Get the build dependencies
build_dep:
go get -u github.com/kisielk/errcheck
go get -u golang.org/x/tools/cmd/goimports
go get -u github.com/golang/lint/golint
# Do source code quality checks
check:
go vet
errcheck
goimports -d . | grep . ; test $$? -eq 1
-#golint

65
vendor/github.com/t3rm1n4l/go-mega/README.md generated vendored Normal file
View file

@ -0,0 +1,65 @@
go-mega
=======
A client library in go for mega.co.nz storage service.
An implementation of command-line utility can be found at [https://github.com/t3rm1n4l/megacmd](https://github.com/t3rm1n4l/megacmd)
[![Build Status](https://secure.travis-ci.org/t3rm1n4l/go-mega.png?branch=master)](http://travis-ci.org/t3rm1n4l/go-mega)
### What can i do with this library?
This is an API client library for MEGA storage service. Currently, the library supports the basic APIs and operations as follows:
- User login
- Fetch filesystem tree
- Upload file
- Download file
- Create directory
- Move file or directory
- Rename file or directory
- Delete file or directory
- Parallel split download and upload
- Filesystem events auto sync
- Unit tests
### API methods
Please find full doc at [http://godoc.org/github.com/t3rm1n4l/go-mega](http://godoc.org/github.com/t3rm1n4l/go-mega)
### Testing
export MEGA_USER=<user_email>
export MEGA_PASSWD=<user_passwd>
$ make test
go test -v
=== RUN TestLogin
--- PASS: TestLogin (1.90 seconds)
=== RUN TestGetUser
--- PASS: TestGetUser (1.65 seconds)
=== RUN TestUploadDownload
--- PASS: TestUploadDownload (12.28 seconds)
=== RUN TestMove
--- PASS: TestMove (9.31 seconds)
=== RUN TestRename
--- PASS: TestRename (9.16 seconds)
=== RUN TestDelete
--- PASS: TestDelete (3.87 seconds)
=== RUN TestCreateDir
--- PASS: TestCreateDir (2.34 seconds)
=== RUN TestConfig
--- PASS: TestConfig (0.01 seconds)
=== RUN TestPathLookup
--- PASS: TestPathLookup (8.54 seconds)
=== RUN TestEventNotify
--- PASS: TestEventNotify (19.65 seconds)
PASS
ok github.com/t3rm1n4l/go-mega68.745s
### TODO
- Implement APIs for public download url generation
- Implement download from public url
- Add shared user content management APIs
- Add contact list management APIs
### License
MIT

85
vendor/github.com/t3rm1n4l/go-mega/errors.go generated vendored Normal file
View file

@ -0,0 +1,85 @@
package mega
import (
"errors"
"fmt"
)
var (
// General errors
EINTERNAL = errors.New("Internal error occured")
EARGS = errors.New("Invalid arguments")
EAGAIN = errors.New("Try again")
ERATELIMIT = errors.New("Rate limit reached")
EBADRESP = errors.New("Bad response from server")
// Upload errors
EFAILED = errors.New("The upload failed. Please restart it from scratch")
ETOOMANY = errors.New("Too many concurrent IP addresses are accessing this upload target URL")
ERANGE = errors.New("The upload file packet is out of range or not starting and ending on a chunk boundary")
EEXPIRED = errors.New("The upload target URL you are trying to access has expired. Please request a fresh one")
// Filesystem/Account errors
ENOENT = errors.New("Object (typically, node or user) not found")
ECIRCULAR = errors.New("Circular linkage attempted")
EACCESS = errors.New("Access violation")
EEXIST = errors.New("Trying to create an object that already exists")
EINCOMPLETE = errors.New("Trying to access an incomplete resource")
EKEY = errors.New("A decryption operation failed")
ESID = errors.New("Invalid or expired user session, please relogin")
EBLOCKED = errors.New("User blocked")
EOVERQUOTA = errors.New("Request over quota")
ETEMPUNAVAIL = errors.New("Resource temporarily not available, please try again later")
EMACMISMATCH = errors.New("MAC verification failed")
EBADATTR = errors.New("Bad node attribute")
// Config errors
EWORKER_LIMIT_EXCEEDED = errors.New("Maximum worker limit exceeded")
)
type ErrorMsg int
func parseError(errno ErrorMsg) error {
switch {
case errno == 0:
return nil
case errno == -1:
return EINTERNAL
case errno == -2:
return EARGS
case errno == -3:
return EAGAIN
case errno == -4:
return ERATELIMIT
case errno == -5:
return EFAILED
case errno == -6:
return ETOOMANY
case errno == -7:
return ERANGE
case errno == -8:
return EEXPIRED
case errno == -9:
return ENOENT
case errno == -10:
return ECIRCULAR
case errno == -11:
return EACCESS
case errno == -12:
return EEXIST
case errno == -13:
return EINCOMPLETE
case errno == -14:
return EKEY
case errno == -15:
return ESID
case errno == -16:
return EBLOCKED
case errno == -17:
return EOVERQUOTA
case errno == -18:
return ETEMPUNAVAIL
}
return fmt.Errorf("Unknown mega error %d", errno)
}

1724
vendor/github.com/t3rm1n4l/go-mega/mega.go generated vendored Normal file

File diff suppressed because it is too large Load diff

351
vendor/github.com/t3rm1n4l/go-mega/mega_test.go generated vendored Normal file
View file

@ -0,0 +1,351 @@
package mega
import (
"crypto/md5"
"crypto/rand"
"fmt"
"io/ioutil"
"os"
"path"
"testing"
"time"
)
var USER string = os.Getenv("MEGA_USER")
var PASSWORD string = os.Getenv("MEGA_PASSWD")
// retry runs fn until it succeeds, using what to log and retrying on
// EAGAIN. It uses exponential backoff
func retry(t *testing.T, what string, fn func() error) {
const maxTries = 10
var err error
sleep := 100 * time.Millisecond
for i := 1; i <= maxTries; i++ {
err = fn()
if err == nil {
return
}
if err != EAGAIN {
break
}
t.Logf("%s failed %d/%d - retrying after %v sleep", what, i, maxTries, sleep)
time.Sleep(sleep)
sleep *= 2
}
t.Fatalf("%s failed: %v", what, err)
}
func initSession(t *testing.T) *Mega {
m := New()
// m.SetDebugger(log.Printf)
retry(t, "Login", func() error {
return m.Login(USER, PASSWORD)
})
return m
}
// createFile creates a temporary file of a given size along with its MD5SUM
func createFile(t *testing.T, size int64) (string, string) {
b := make([]byte, size)
_, err := rand.Read(b)
if err != nil {
t.Fatalf("Error reading rand: %v", err)
}
file, err := ioutil.TempFile("/tmp/", "gomega-")
if err != nil {
t.Fatalf("Error creating temp file: %v", err)
}
_, err = file.Write(b)
if err != nil {
t.Fatalf("Error writing temp file: %v", err)
}
h := md5.New()
_, err = h.Write(b)
if err != nil {
t.Fatalf("Error on Write while writing temp file: %v", err)
}
return file.Name(), fmt.Sprintf("%x", h.Sum(nil))
}
// uploadFile uploads a temporary file of a given size returning the
// node, name and its MD5SUM
func uploadFile(t *testing.T, session *Mega, size int64, parent *Node) (node *Node, name string, md5sum string) {
name, md5sum = createFile(t, size)
defer func() {
_ = os.Remove(name)
}()
var err error
retry(t, fmt.Sprintf("Upload %q", name), func() error {
node, err = session.UploadFile(name, parent, "", nil)
return err
})
if node == nil {
t.Fatalf("Failed to obtain node after upload for %q", name)
}
return node, name, md5sum
}
// createDir creates a directory under parent
func createDir(t *testing.T, session *Mega, name string, parent *Node) (node *Node) {
var err error
retry(t, fmt.Sprintf("Create directory %q", name), func() error {
node, err = session.CreateDir(name, parent)
return err
})
return node
}
func fileMD5(t *testing.T, name string) string {
file, err := os.Open(name)
if err != nil {
t.Fatalf("Failed to open %q: %v", name, err)
}
b, err := ioutil.ReadAll(file)
if err != nil {
t.Fatalf("Failed to read all %q: %v", name, err)
}
h := md5.New()
_, err = h.Write(b)
if err != nil {
t.Fatalf("Error on hash in fileMD5: %v", err)
}
return fmt.Sprintf("%x", h.Sum(nil))
}
func TestLogin(t *testing.T) {
m := New()
retry(t, "Login", func() error {
return m.Login(USER, PASSWORD)
})
}
func TestGetUser(t *testing.T) {
session := initSession(t)
_, err := session.GetUser()
if err != nil {
t.Fatal("GetUser failed", err)
}
}
func TestUploadDownload(t *testing.T) {
session := initSession(t)
node, name, h1 := uploadFile(t, session, 314573, session.FS.root)
session.FS.mutex.Lock()
phash := session.FS.root.hash
n := session.FS.lookup[node.hash]
if n.parent.hash != phash {
t.Error("Parent of uploaded file mismatch")
}
session.FS.mutex.Unlock()
err := session.DownloadFile(node, name, nil)
if err != nil {
t.Fatal("Download failed", err)
}
h2 := fileMD5(t, name)
err = os.Remove(name)
if err != nil {
t.Error("Failed to remove file", err)
}
if h1 != h2 {
t.Error("MD5 mismatch for downloaded file")
}
}
func TestMove(t *testing.T) {
session := initSession(t)
node, _, _ := uploadFile(t, session, 31, session.FS.root)
hash := node.hash
phash := session.FS.trash.hash
err := session.Move(node, session.FS.trash)
if err != nil {
t.Fatal("Move failed", err)
}
session.FS.mutex.Lock()
n := session.FS.lookup[hash]
if n.parent.hash != phash {
t.Error("Move happened to wrong parent", phash, n.parent.hash)
}
session.FS.mutex.Unlock()
}
func TestRename(t *testing.T) {
session := initSession(t)
node, _, _ := uploadFile(t, session, 31, session.FS.root)
err := session.Rename(node, "newname.txt")
if err != nil {
t.Fatal("Rename failed", err)
}
session.FS.mutex.Lock()
newname := session.FS.lookup[node.hash].name
if newname != "newname.txt" {
t.Error("Renamed to wrong name", newname)
}
session.FS.mutex.Unlock()
}
func TestDelete(t *testing.T) {
session := initSession(t)
node, _, _ := uploadFile(t, session, 31, session.FS.root)
retry(t, "Soft delete", func() error {
return session.Delete(node, false)
})
session.FS.mutex.Lock()
node = session.FS.lookup[node.hash]
if node.parent != session.FS.trash {
t.Error("Expects file to be moved to trash")
}
session.FS.mutex.Unlock()
retry(t, "Hard delete", func() error {
return session.Delete(node, true)
})
time.Sleep(1 * time.Second) // wait for the event
session.FS.mutex.Lock()
if _, ok := session.FS.lookup[node.hash]; ok {
t.Error("Expects file to be dissapeared")
}
session.FS.mutex.Unlock()
}
func TestCreateDir(t *testing.T) {
session := initSession(t)
node := createDir(t, session, "testdir1", session.FS.root)
node2 := createDir(t, session, "testdir2", node)
session.FS.mutex.Lock()
nnode2 := session.FS.lookup[node2.hash]
if nnode2.parent.hash != node.hash {
t.Error("Wrong directory parent")
}
session.FS.mutex.Unlock()
}
func TestConfig(t *testing.T) {
m := New()
m.SetAPIUrl("http://invalid.domain")
err := m.Login(USER, PASSWORD)
if err == nil {
t.Error("API Url: Expected failure")
}
err = m.SetDownloadWorkers(100)
if err != EWORKER_LIMIT_EXCEEDED {
t.Error("Download: Expected EWORKER_LIMIT_EXCEEDED error")
}
err = m.SetUploadWorkers(100)
if err != EWORKER_LIMIT_EXCEEDED {
t.Error("Upload: Expected EWORKER_LIMIT_EXCEEDED error")
}
// TODO: Add timeout test cases
}
func TestPathLookup(t *testing.T) {
session := initSession(t)
rs, err := randString(5)
if err != nil {
t.Fatalf("failed to make random string: %v", err)
}
node1 := createDir(t, session, "dir-1-"+rs, session.FS.root)
node21 := createDir(t, session, "dir-2-1-"+rs, node1)
node22 := createDir(t, session, "dir-2-2-"+rs, node1)
node31 := createDir(t, session, "dir-3-1-"+rs, node21)
node32 := createDir(t, session, "dir-3-2-"+rs, node22)
_ = node32
_, name1, _ := uploadFile(t, session, 31, node31)
_, _, _ = uploadFile(t, session, 31, node31)
_, name3, _ := uploadFile(t, session, 31, node22)
testpaths := [][]string{
{"dir-1-" + rs, "dir-2-2-" + rs, path.Base(name3)},
{"dir-1-" + rs, "dir-2-1-" + rs, "dir-3-1-" + rs},
{"dir-1-" + rs, "dir-2-1-" + rs, "dir-3-1-" + rs, path.Base(name1)},
{"dir-1-" + rs, "dir-2-1-" + rs, "none"},
}
results := []error{nil, nil, nil, ENOENT}
for i, tst := range testpaths {
ns, e := session.FS.PathLookup(session.FS.root, tst)
switch {
case e != results[i]:
t.Errorf("Test %d failed: wrong result", i)
default:
if results[i] == nil && len(tst) != len(ns) {
t.Errorf("Test %d failed: result array len (%d) mismatch", i, len(ns))
}
arr := []string{}
for n := range ns {
if tst[n] != ns[n].name {
t.Errorf("Test %d failed: result node mismatches (%v) and (%v)", i, tst, arr)
break
}
arr = append(arr, tst[n])
}
}
}
}
func TestEventNotify(t *testing.T) {
session1 := initSession(t)
session2 := initSession(t)
node, _, _ := uploadFile(t, session1, 31, session1.FS.root)
for i := 0; i < 60; i++ {
time.Sleep(time.Second * 1)
node = session2.FS.HashLookup(node.GetHash())
if node != nil {
break
}
}
if node == nil {
t.Fatal("Expects file to found in second client's FS")
}
retry(t, "Delete", func() error {
return session2.Delete(node, true)
})
time.Sleep(time.Second * 5)
node = session1.FS.HashLookup(node.hash)
if node != nil {
t.Fatal("Expects file to not-found in first client's FS")
}
}
func TestExportLink(t *testing.T) {
session := initSession(t)
node, _, _ := uploadFile(t, session, 31, session.FS.root)
// Don't include decryption key
retry(t, "Failed to export link (key not included)", func() error {
_, err := session.Link(node, false)
return err
})
// Do include decryption key
retry(t, "Failed to export link (key included)", func() error {
_, err := session.Link(node, true)
return err
})
}

200
vendor/github.com/t3rm1n4l/go-mega/messages.go generated vendored Normal file
View file

@ -0,0 +1,200 @@
package mega
import "encoding/json"
type LoginMsg struct {
Cmd string `json:"a"`
User string `json:"user"`
Handle string `json:"uh"`
}
type LoginResp struct {
Csid string `json:"csid"`
Privk string `json:"privk"`
Key string `json:"k"`
}
type UserMsg struct {
Cmd string `json:"a"`
}
type UserResp struct {
U string `json:"u"`
S int `json:"s"`
Email string `json:"email"`
Name string `json:"name"`
Key string `json:"k"`
C int `json:"c"`
Pubk string `json:"pubk"`
Privk string `json:"privk"`
Terms string `json:"terms"`
TS string `json:"ts"`
}
type QuotaMsg struct {
// Action, should be "uq" for quota request
Cmd string `json:"a"`
// xfer should be 1
Xfer int `json:"xfer"`
// Without strg=1 only reports total capacity for account
Strg int `json:"strg,omitempty"`
}
type QuotaResp struct {
// Mstrg is total capacity in bytes
Mstrg uint64 `json:"mstrg"`
// Cstrg is used capacity in bytes
Cstrg uint64 `json:"cstrg"`
// Per folder usage in bytes?
Cstrgn map[string][]int64 `json:"cstrgn"`
}
type FilesMsg struct {
Cmd string `json:"a"`
C int `json:"c"`
}
type FSNode struct {
Hash string `json:"h"`
Parent string `json:"p"`
User string `json:"u"`
T int `json:"t"`
Attr string `json:"a"`
Key string `json:"k"`
Ts int64 `json:"ts"`
SUser string `json:"su"`
SKey string `json:"sk"`
Sz int64 `json:"s"`
}
type FilesResp struct {
F []FSNode `json:"f"`
Ok []struct {
Hash string `json:"h"`
Key string `json:"k"`
} `json:"ok"`
S []struct {
Hash string `json:"h"`
User string `json:"u"`
} `json:"s"`
User []struct {
User string `json:"u"`
C int `json:"c"`
Email string `json:"m"`
} `json:"u"`
Sn string `json:"sn"`
}
type FileAttr struct {
Name string `json:"n"`
}
type GetLinkMsg struct {
Cmd string `json:"a"`
N string `json:"n"`
}
type DownloadMsg struct {
Cmd string `json:"a"`
G int `json:"g"`
P string `json:"p,omitempty"`
N string `json:"n,omitempty"`
}
type DownloadResp struct {
G string `json:"g"`
Size uint64 `json:"s"`
Attr string `json:"at"`
Err uint32 `json:"e"`
}
type UploadMsg struct {
Cmd string `json:"a"`
S int64 `json:"s"`
}
type UploadResp struct {
P string `json:"p"`
}
type UploadCompleteMsg struct {
Cmd string `json:"a"`
T string `json:"t"`
N [1]struct {
H string `json:"h"`
T int `json:"t"`
A string `json:"a"`
K string `json:"k"`
} `json:"n"`
I string `json:"i,omitempty"`
}
type UploadCompleteResp struct {
F []FSNode `json:"f"`
}
type FileInfoMsg struct {
Cmd string `json:"a"`
F int `json:"f"`
P string `json:"p"`
}
type MoveFileMsg struct {
Cmd string `json:"a"`
N string `json:"n"`
T string `json:"t"`
I string `json:"i"`
}
type FileAttrMsg struct {
Cmd string `json:"a"`
Attr string `json:"attr"`
Key string `json:"key"`
N string `json:"n"`
I string `json:"i"`
}
type FileDeleteMsg struct {
Cmd string `json:"a"`
N string `json:"n"`
I string `json:"i"`
}
// GenericEvent is a generic event for parsing the Cmd type before
// decoding more specifically
type GenericEvent struct {
Cmd string `json:"a"`
}
// FSEvent - event for various file system events
//
// Delete (a=d)
// Update attr (a=u)
// New nodes (a=t)
type FSEvent struct {
Cmd string `json:"a"`
T struct {
Files []FSNode `json:"f"`
} `json:"t"`
Owner string `json:"ou"`
N string `json:"n"`
User string `json:"u"`
Attr string `json:"at"`
Key string `json:"k"`
Ts int64 `json:"ts"`
I string `json:"i"`
}
// Events is received from a poll of the server to read the events
//
// Each event can be an error message or a different field so we delay
// decoding
type Events struct {
W string `json:"w"`
Sn string `json:"sn"`
E []json.RawMessage `json:"a"`
}

329
vendor/github.com/t3rm1n4l/go-mega/utils.go generated vendored Normal file
View file

@ -0,0 +1,329 @@
package mega
import (
"bytes"
"crypto/aes"
"crypto/cipher"
"crypto/rand"
"encoding/base64"
"encoding/binary"
"encoding/json"
"errors"
"math/big"
"net"
"net/http"
"strings"
"time"
)
func newHttpClient(timeout time.Duration) *http.Client {
// TODO: Need to test this out
// Doesn't seem to work as expected
c := &http.Client{
Transport: &http.Transport{
Dial: func(netw, addr string) (net.Conn, error) {
c, err := net.DialTimeout(netw, addr, timeout)
if err != nil {
return nil, err
}
return c, nil
},
Proxy: http.ProxyFromEnvironment,
},
}
return c
}
// bytes_to_a32 converts the byte slice b to uint32 slice considering
// the bytes to be in big endian order.
func bytes_to_a32(b []byte) []uint32 {
length := len(b) + 3
a := make([]uint32, length/4)
buf := bytes.NewBuffer(b)
for i, _ := range a {
_ = binary.Read(buf, binary.BigEndian, &a[i])
}
return a
}
// a32_to_bytes converts the uint32 slice a to byte slice where each
// uint32 is decoded in big endian order.
func a32_to_bytes(a []uint32) []byte {
buf := new(bytes.Buffer)
buf.Grow(len(a) * 4) // To prevent reallocations in Write
for _, v := range a {
_ = binary.Write(buf, binary.BigEndian, v)
}
return buf.Bytes()
}
// base64urlencode encodes byte slice b using base64 url encoding.
// It removes `=` padding when necessary
func base64urlencode(b []byte) []byte {
enc := base64.URLEncoding
encSize := enc.EncodedLen(len(b))
buf := make([]byte, encSize)
enc.Encode(buf, b)
paddSize := 3 - len(b)%3
if paddSize < 3 {
encSize -= paddSize
buf = buf[:encSize]
}
return buf
}
// base64urldecode decodes the byte slice b using base64 url decoding.
// It adds required '=' padding before decoding.
func base64urldecode(b []byte) []byte {
enc := base64.URLEncoding
padSize := 4 - len(b)%4
switch padSize {
case 1:
b = append(b, '=')
case 2:
b = append(b, '=', '=')
}
decSize := enc.DecodedLen(len(b))
buf := make([]byte, decSize)
n, _ := enc.Decode(buf, b)
return buf[:n]
}
// base64_to_a32 converts base64 encoded byte slice b to uint32 slice.
func base64_to_a32(b []byte) []uint32 {
return bytes_to_a32(base64urldecode(b))
}
// a32_to_base64 converts uint32 slice to base64 encoded byte slice.
func a32_to_base64(a []uint32) []byte {
return base64urlencode(a32_to_bytes(a))
}
// paddnull pads byte slice b such that the size of resulting byte
// slice is a multiple of q.
func paddnull(b []byte, q int) []byte {
if rem := len(b) % q; rem != 0 {
l := q - rem
for i := 0; i < l; i++ {
b = append(b, 0)
}
}
return b
}
// password_key calculates password hash from the user password.
func password_key(p string) []byte {
a := bytes_to_a32(paddnull([]byte(p), 4))
pkey := a32_to_bytes([]uint32{0x93C467E3, 0x7DB0C7A4, 0xD1BE3F81, 0x0152CB56})
n := (len(a) + 3) / 4
ciphers := make([]cipher.Block, n)
for j := 0; j < len(a); j += 4 {
key := []uint32{0, 0, 0, 0}
for k := 0; k < 4; k++ {
if j+k < len(a) {
key[k] = a[k+j]
}
}
ciphers[j/4], _ = aes.NewCipher(a32_to_bytes(key)) // Uses AES in ECB mode
}
for i := 65536; i > 0; i-- {
for j := 0; j < n; j++ {
ciphers[j].Encrypt(pkey, pkey)
}
}
return pkey
}
// stringhash computes generic string hash. Uses k as the key for AES
// cipher.
func stringhash(s string, k []byte) []byte {
a := bytes_to_a32(paddnull([]byte(s), 4))
h := []uint32{0, 0, 0, 0}
for i, v := range a {
h[i&3] ^= v
}
hb := a32_to_bytes(h)
cipher, _ := aes.NewCipher(k)
for i := 16384; i > 0; i-- {
cipher.Encrypt(hb, hb)
}
ha := bytes_to_a32(paddnull(hb, 4))
return a32_to_base64([]uint32{ha[0], ha[2]})
}
// getMPI returns the length encoded Int and the next slice.
func getMPI(b []byte) (*big.Int, []byte) {
p := new(big.Int)
plen := (uint64(b[0])*256 + uint64(b[1]) + 7) >> 3
p.SetBytes(b[2 : plen+2])
b = b[plen+2:]
return p, b
}
// getRSAKey decodes the RSA Key from the byte slice b.
func getRSAKey(b []byte) (*big.Int, *big.Int, *big.Int) {
p, b := getMPI(b)
q, b := getMPI(b)
d, _ := getMPI(b)
return p, q, d
}
// decryptRSA decrypts message m using RSA private key (p,q,d)
func decryptRSA(m, p, q, d *big.Int) []byte {
n := new(big.Int)
r := new(big.Int)
n.Mul(p, q)
r.Exp(m, d, n)
return r.Bytes()
}
// blockDecrypt decrypts using the block cipher blk in ECB mode.
func blockDecrypt(blk cipher.Block, dst, src []byte) error {
if len(src) > len(dst) || len(src)%blk.BlockSize() != 0 {
return errors.New("Block decryption failed")
}
l := len(src) - blk.BlockSize()
for i := 0; i <= l; i += blk.BlockSize() {
blk.Decrypt(dst[i:], src[i:])
}
return nil
}
// blockEncrypt encrypts using the block cipher blk in ECB mode.
func blockEncrypt(blk cipher.Block, dst, src []byte) error {
if len(src) > len(dst) || len(src)%blk.BlockSize() != 0 {
return errors.New("Block encryption failed")
}
l := len(src) - blk.BlockSize()
for i := 0; i <= l; i += blk.BlockSize() {
blk.Encrypt(dst[i:], src[i:])
}
return nil
}
// decryptSeessionId decrypts the session id using the given private
// key.
func decryptSessionId(privk []byte, csid []byte, mk []byte) ([]byte, error) {
block, _ := aes.NewCipher(mk)
pk := base64urldecode(privk)
err := blockDecrypt(block, pk, pk)
if err != nil {
return nil, err
}
c := base64urldecode(csid)
m, _ := getMPI(c)
p, q, d := getRSAKey(pk)
r := decryptRSA(m, p, q, d)
return base64urlencode(r[:43]), nil
}
// chunkSize describes a size and position of chunk
type chunkSize struct {
position int64
size int
}
func getChunkSizes(size int64) (chunks []chunkSize) {
p := int64(0)
for i := 1; size > 0; i++ {
var chunk int
if i <= 8 {
chunk = i * 131072
} else {
chunk = 1048576
}
if size < int64(chunk) {
chunk = int(size)
}
chunks = append(chunks, chunkSize{position: p, size: chunk})
p += int64(chunk)
size -= int64(chunk)
}
return chunks
}
func decryptAttr(key []byte, data []byte) (attr FileAttr, err error) {
err = EBADATTR
block, err := aes.NewCipher(key)
if err != nil {
return attr, err
}
iv := a32_to_bytes([]uint32{0, 0, 0, 0})
mode := cipher.NewCBCDecrypter(block, iv)
buf := make([]byte, len(data))
mode.CryptBlocks(buf, base64urldecode([]byte(data)))
if string(buf[:4]) == "MEGA" {
str := strings.TrimRight(string(buf[4:]), "\x00")
err = json.Unmarshal([]byte(str), &attr)
}
return attr, err
}
func encryptAttr(key []byte, attr FileAttr) (b []byte, err error) {
err = EBADATTR
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
data, err := json.Marshal(attr)
if err != nil {
return nil, err
}
attrib := []byte("MEGA")
attrib = append(attrib, data...)
attrib = paddnull(attrib, 16)
iv := a32_to_bytes([]uint32{0, 0, 0, 0})
mode := cipher.NewCBCEncrypter(block, iv)
mode.CryptBlocks(attrib, attrib)
b = base64urlencode(attrib)
return b, nil
}
func randString(l int) (string, error) {
encoding := "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789AB"
b := make([]byte, l)
_, err := rand.Read(b)
if err != nil {
return "", err
}
enc := base64.NewEncoding(encoding)
d := make([]byte, enc.EncodedLen(len(b)))
enc.Encode(d, b)
d = d[:l]
return string(d), nil
}

105
vendor/github.com/t3rm1n4l/go-mega/utils_test.go generated vendored Normal file
View file

@ -0,0 +1,105 @@
package mega
import (
"reflect"
"testing"
)
func TestGetChunkSizes(t *testing.T) {
const k = 1024
for _, test := range []struct {
size int64
want []chunkSize
}{
{
size: 0,
want: []chunkSize(nil),
},
{
size: 1,
want: []chunkSize{
{0, 1},
},
},
{
size: 128*k - 1,
want: []chunkSize{
{0, 128*k - 1},
},
},
{
size: 128 * k,
want: []chunkSize{
{0, 128 * k},
},
},
{
size: 128*k + 1,
want: []chunkSize{
{0, 128 * k},
{128 * k, 1},
},
},
{
size: 384*k - 1,
want: []chunkSize{
{0, 128 * k},
{128 * k, 256*k - 1},
},
},
{
size: 384 * k,
want: []chunkSize{
{0, 128 * k},
{128 * k, 256 * k},
},
},
{
size: 384*k + 1,
want: []chunkSize{
{0, 128 * k},
{128 * k, 256 * k},
{384 * k, 1},
},
},
{
size: 5 * k * k,
want: []chunkSize{
{0, 128 * k},
{128 * k, 256 * k},
{384 * k, 384 * k},
{768 * k, 512 * k},
{1280 * k, 640 * k},
{1920 * k, 768 * k},
{2688 * k, 896 * k},
{3584 * k, 1024 * k},
{4608 * k, 512 * k},
},
},
{
size: 10 * k * k,
want: []chunkSize{
{0, 128 * k},
{128 * k, 256 * k},
{384 * k, 384 * k},
{768 * k, 512 * k},
{1280 * k, 640 * k},
{1920 * k, 768 * k},
{2688 * k, 896 * k},
{3584 * k, 1024 * k},
{4608 * k, 1024 * k},
{5632 * k, 1024 * k},
{6656 * k, 1024 * k},
{7680 * k, 1024 * k},
{8704 * k, 1024 * k},
{9728 * k, 512 * k},
},
},
} {
got := getChunkSizes(test.size)
if !reflect.DeepEqual(test.want, got) {
t.Errorf("incorrect chunks for size %d: want %#v, got %#v", test.size, test.want, got)
}
}
}