Merge remote-tracking branch 'fw42/mixed_caps'

This commit is contained in:
Alexander Neumann 2015-04-25 10:59:09 +02:00
commit 19486987b0
13 changed files with 76 additions and 76 deletions

View file

@ -32,7 +32,7 @@ type SFTP struct {
cmd *exec.Cmd
}
func start_client(program string, args ...string) (*SFTP, error) {
func startClient(program string, args ...string) (*SFTP, error) {
// Connect to a remote host and request the sftp subsystem via the 'ssh'
// command. This assumes that passwordless login is correctly configured.
cmd := exec.Command(program, args...)
@ -68,7 +68,7 @@ func start_client(program string, args ...string) (*SFTP, error) {
// exec.Command, it is expected to speak sftp on stdin/stdout. The backend
// is expected at the given path.
func Open(dir string, program string, args ...string) (*SFTP, error) {
sftp, err := start_client(program, args...)
sftp, err := startClient(program, args...)
if err != nil {
return nil, err
}
@ -141,7 +141,7 @@ func Open(dir string, program string, args ...string) (*SFTP, error) {
// Create creates all the necessary files and directories for a new sftp
// backend at dir.
func Create(dir string, program string, args ...string) (*SFTP, error) {
sftp, err := start_client(program, args...)
sftp, err := startClient(program, args...)
if err != nil {
return nil, err
}

View file

@ -54,9 +54,9 @@ func (c Chunk) Reader(r io.ReaderAt) io.Reader {
// A chunker internally holds everything needed to split content.
type Chunker struct {
pol Pol
pol_shift uint
tables *tables
pol Pol
polShift uint
tables *tables
rd io.Reader
closed bool
@ -93,8 +93,8 @@ func New(rd io.Reader, p Pol, bufsize int, hash hash.Hash) *Chunker {
// polynomial and reader.
func (c *Chunker) Reset(rd io.Reader, p Pol) {
c.pol = p
c.pol_shift = uint(p.Deg() - 8)
c.fill_tables()
c.polShift = uint(p.Deg() - 8)
c.fillTables()
c.rd = rd
for i := 0; i < WindowSize; i++ {
@ -121,7 +121,7 @@ func (c *Chunker) Reset(rd io.Reader, p Pol) {
// Calculate out_table and mod_table for optimization. Must be called only
// once. This implementation uses a cache in the global variable cache.
func (c *Chunker) fill_tables() {
func (c *Chunker) fillTables() {
// if polynomial hasn't been specified, do not compute anything for now
if c.pol == 0 {
return
@ -153,9 +153,9 @@ func (c *Chunker) fill_tables() {
for b := 0; b < 256; b++ {
var h Pol
h = append_byte(h, byte(b), c.pol)
h = appendByte(h, byte(b), c.pol)
for i := 0; i < WindowSize-1; i++ {
h = append_byte(h, 0, c.pol)
h = appendByte(h, 0, c.pol)
}
c.tables.out[b] = h
}
@ -249,7 +249,7 @@ func (c *Chunker) Next() (*Chunk, error) {
c.wpos = (c.wpos + 1) % WindowSize
// c.append(b)
index := c.digest >> c.pol_shift
index := c.digest >> c.polShift
c.digest <<= 8
c.digest |= uint64(b)
@ -319,7 +319,7 @@ func (c *Chunker) hashDigest() []byte {
}
func (c *Chunker) append(b byte) {
index := c.digest >> c.pol_shift
index := c.digest >> c.polShift
c.digest <<= 8
c.digest |= uint64(b)
@ -335,7 +335,7 @@ func (c *Chunker) slide(b byte) {
c.append(b)
}
func append_byte(hash Pol, b byte, pol Pol) Pol {
func appendByte(hash Pol, b byte, pol Pol) Pol {
hash <<= 8
hash |= Pol(b)

View file

@ -79,7 +79,7 @@ var chunks2 = []chunk{
chunk{chunker.MinSize, 0, parseDigest("07854d2fef297a06ba81685e660c332de36d5d18d546927d30daad6d7fda1541")},
}
func test_with_data(t *testing.T, chnker *chunker.Chunker, testChunks []chunk) []*chunker.Chunk {
func testWithData(t *testing.T, chnker *chunker.Chunker, testChunks []chunk) []*chunker.Chunk {
chunks := []*chunker.Chunk{}
pos := uint(0)
@ -133,7 +133,7 @@ func test_with_data(t *testing.T, chnker *chunker.Chunker, testChunks []chunk) [
return chunks
}
func get_random(seed, count int) []byte {
func getRandom(seed, count int) []byte {
buf := make([]byte, count)
rnd := rand.New(rand.NewSource(23))
@ -150,9 +150,9 @@ func get_random(seed, count int) []byte {
func TestChunker(t *testing.T) {
// setup data source
buf := get_random(23, 32*1024*1024)
buf := getRandom(23, 32*1024*1024)
ch := chunker.New(bytes.NewReader(buf), testPol, *testBufSize, sha256.New())
chunks := test_with_data(t, ch, chunks1)
chunks := testWithData(t, ch, chunks1)
// test reader
for i, c := range chunks {
@ -180,12 +180,12 @@ func TestChunker(t *testing.T) {
buf = bytes.Repeat([]byte{0}, len(chunks2)*chunker.MinSize)
ch = chunker.New(bytes.NewReader(buf), testPol, *testBufSize, sha256.New())
test_with_data(t, ch, chunks2)
testWithData(t, ch, chunks2)
}
func TestChunkerWithRandomPolynomial(t *testing.T) {
// setup data source
buf := get_random(23, 32*1024*1024)
buf := getRandom(23, 32*1024*1024)
// generate a new random polynomial
start := time.Now()
@ -210,9 +210,9 @@ func TestChunkerWithRandomPolynomial(t *testing.T) {
func TestChunkerWithoutHash(t *testing.T) {
// setup data source
buf := get_random(23, 32*1024*1024)
buf := getRandom(23, 32*1024*1024)
ch := chunker.New(bytes.NewReader(buf), testPol, *testBufSize, nil)
chunks := test_with_data(t, ch, chunks1)
chunks := testWithData(t, ch, chunks1)
// test reader
for i, c := range chunks {
@ -243,17 +243,17 @@ func TestChunkerWithoutHash(t *testing.T) {
buf = bytes.Repeat([]byte{0}, len(chunks2)*chunker.MinSize)
ch = chunker.New(bytes.NewReader(buf), testPol, *testBufSize, sha256.New())
test_with_data(t, ch, chunks2)
testWithData(t, ch, chunks2)
}
func TestChunkerReuse(t *testing.T) {
// test multiple uses of the same chunker
ch := chunker.New(nil, testPol, *testBufSize, sha256.New())
buf := get_random(23, 32*1024*1024)
buf := getRandom(23, 32*1024*1024)
for i := 0; i < 4; i++ {
ch.Reset(bytes.NewReader(buf), testPol)
test_with_data(t, ch, chunks1)
testWithData(t, ch, chunks1)
}
}
@ -281,7 +281,7 @@ func benchmarkChunker(b *testing.B, hash hash.Hash) {
rd = f
} else {
size = 10 * 1024 * 1024
rd = bytes.NewReader(get_random(23, size))
rd = bytes.NewReader(getRandom(23, size))
}
ch := chunker.New(rd, testPol, *testBufSize, hash)

View file

@ -28,8 +28,8 @@ func rndRd(bytes int) io.Reader {
return io.LimitReader(urnd, int64(bytes))
}
func create_dir(target string, depth int) {
fmt.Printf("create_dir %s, depth %d\n", target, depth)
func createDir(target string, depth int) {
fmt.Printf("createDir %s, depth %d\n", target, depth)
err := os.Mkdir(target, 0755)
if err != nil && !os.IsExist(err) {
panic(err)
@ -54,7 +54,7 @@ func create_dir(target string, depth int) {
panic(err)
}
} else {
create_dir(filepath.Join(target, fmt.Sprintf("dir%d", i)), depth-1)
createDir(filepath.Join(target, fmt.Sprintf("dir%d", i)), depth-1)
}
}
}
@ -65,5 +65,5 @@ func main() {
os.Exit(1)
}
create_dir(os.Args[1], MaxDepth)
createDir(os.Args[1], MaxDepth)
}

View file

@ -27,7 +27,7 @@ func init() {
}
}
func format_bytes(c uint64) string {
func formatBytes(c uint64) string {
b := float64(c)
switch {
@ -44,7 +44,7 @@ func format_bytes(c uint64) string {
}
}
func format_seconds(sec uint64) string {
func formatSeconds(sec uint64) string {
hours := sec / 3600
sec -= hours * 3600
min := sec / 60
@ -56,16 +56,16 @@ func format_seconds(sec uint64) string {
return fmt.Sprintf("%d:%02d", min, sec)
}
func format_duration(d time.Duration) string {
func formatDuration(d time.Duration) string {
sec := uint64(d / time.Second)
return format_seconds(sec)
return formatSeconds(sec)
}
func print_tree2(indent int, t *restic.Tree) {
func printTree2(indent int, t *restic.Tree) {
for _, node := range t.Nodes {
if node.Tree() != nil {
fmt.Printf("%s%s/\n", strings.Repeat(" ", indent), node.Name)
print_tree2(indent+1, node.Tree())
printTree2(indent+1, node.Tree())
} else {
fmt.Printf("%s%s\n", strings.Repeat(" ", indent), node.Name)
}
@ -87,10 +87,10 @@ func newCacheRefreshProgress() *restic.Progress {
}
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\x1b[2K[%s] %d trees loaded\r", format_duration(d), s.Trees)
fmt.Printf("\x1b[2K[%s] %d trees loaded\r", formatDuration(d), s.Trees)
}
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\x1b[2Krefreshed cache in %s\n", format_duration(d))
fmt.Printf("\x1b[2Krefreshed cache in %s\n", formatDuration(d))
}
return p
@ -103,10 +103,10 @@ func newScanProgress() *restic.Progress {
p := restic.NewProgress(time.Second)
p.OnUpdate = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\x1b[2K[%s] %d directories, %d files, %s\r", format_duration(d), s.Dirs, s.Files, format_bytes(s.Bytes))
fmt.Printf("\x1b[2K[%s] %d directories, %d files, %s\r", formatDuration(d), s.Dirs, s.Files, formatBytes(s.Bytes))
}
p.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
fmt.Printf("\x1b[2Kscanned %d directories, %d files in %s\n", s.Dirs, s.Files, format_duration(d))
fmt.Printf("\x1b[2Kscanned %d directories, %d files in %s\n", s.Dirs, s.Files, formatDuration(d))
}
return p
@ -140,12 +140,12 @@ func newArchiveProgress(todo restic.Stat) *restic.Progress {
}
status1 := fmt.Sprintf("[%s] %3.2f%% %s/s %s / %s %d / %d items ",
format_duration(d),
formatDuration(d),
percent,
format_bytes(bps),
format_bytes(s.Bytes), format_bytes(todo.Bytes),
formatBytes(bps),
formatBytes(s.Bytes), formatBytes(todo.Bytes),
itemsDone, itemsTodo)
status2 := fmt.Sprintf("ETA %s ", format_seconds(eta))
status2 := fmt.Sprintf("ETA %s ", formatSeconds(eta))
w, _, err := terminal.GetSize(int(os.Stdout.Fd()))
if err == nil {
@ -161,7 +161,7 @@ func newArchiveProgress(todo restic.Stat) *restic.Progress {
archiveProgress.OnDone = func(s restic.Stat, d time.Duration, ticker bool) {
sec := uint64(d / time.Second)
fmt.Printf("\nduration: %s, %.2fMiB/s\n",
format_duration(d),
formatDuration(d),
float64(todo.Bytes)/float64(sec)/(1<<20))
}

View file

@ -161,7 +161,7 @@ func fsckTree(opts CmdFsck, s restic.Server, blob restic.Blob) error {
return firstErr
}
func fsck_snapshot(opts CmdFsck, s restic.Server, id backend.ID) error {
func fsckSnapshot(opts CmdFsck, s restic.Server, id backend.ID) error {
debug.Log("restic.fsck", "checking snapshot %v\n", id)
sn, err := restic.LoadSnapshot(s, id)
@ -211,7 +211,7 @@ func (cmd CmdFsck) Execute(args []string) error {
fmt.Fprintf(os.Stderr, "invalid snapshot id %v\n", name)
}
err = fsck_snapshot(cmd, s, id)
err = fsckSnapshot(cmd, s, id)
if err != nil {
fmt.Fprintf(os.Stderr, "check for snapshot %v failed\n", id)
}
@ -235,7 +235,7 @@ func (cmd CmdFsck) Execute(args []string) error {
continue
}
err = fsck_snapshot(cmd, s, id)
err = fsckSnapshot(cmd, s, id)
if err != nil {
fmt.Fprintf(os.Stderr, "check for snapshot %v failed\n", id)
firstErr = err

View file

@ -21,7 +21,7 @@ func init() {
}
}
func list_keys(s restic.Server) error {
func listKeys(s restic.Server) error {
tab := NewTable()
tab.Header = fmt.Sprintf(" %-10s %-10s %-10s %s", "ID", "User", "Host", "Created")
tab.RowFormat = "%s%-10s %-10s %-10s %s"
@ -56,7 +56,7 @@ func list_keys(s restic.Server) error {
return nil
}
func add_key(s restic.Server) error {
func addKey(s restic.Server) error {
pw := readPassword("RESTIC_NEWPASSWORD", "enter password for new key: ")
pw2 := readPassword("RESTIC_NEWPASSWORD", "enter password again: ")
@ -74,7 +74,7 @@ func add_key(s restic.Server) error {
return nil
}
func delete_key(s restic.Server, name string) error {
func deleteKey(s restic.Server, name string) error {
if name == s.Key().Name() {
return errors.New("refusing to remove key currently used to access repository")
}
@ -88,7 +88,7 @@ func delete_key(s restic.Server, name string) error {
return nil
}
func change_password(s restic.Server) error {
func changePassword(s restic.Server) error {
pw := readPassword("RESTIC_NEWPASSWORD", "enter password for new key: ")
pw2 := readPassword("RESTIC_NEWPASSWORD", "enter password again: ")
@ -129,18 +129,18 @@ func (cmd CmdKey) Execute(args []string) error {
switch args[0] {
case "list":
return list_keys(s)
return listKeys(s)
case "add":
return add_key(s)
return addKey(s)
case "rm":
id, err := backend.Find(s, backend.Key, args[1])
if err != nil {
return err
}
return delete_key(s, id)
return deleteKey(s, id)
case "change":
return change_password(s)
return changePassword(s)
}
return nil

View file

@ -21,7 +21,7 @@ func init() {
}
}
func print_node(prefix string, n *restic.Node) string {
func printNode(prefix string, n *restic.Node) string {
switch n.Type {
case "file":
return fmt.Sprintf("%s %5d %5d %6d %s %s",
@ -37,14 +37,14 @@ func print_node(prefix string, n *restic.Node) string {
}
}
func print_tree(prefix string, s restic.Server, blob restic.Blob) error {
func printTree(prefix string, s restic.Server, blob restic.Blob) error {
tree, err := restic.LoadTree(s, blob)
if err != nil {
return err
}
for _, entry := range tree.Nodes {
fmt.Println(print_node(prefix, entry))
fmt.Println(printNode(prefix, entry))
if entry.Type == "dir" && entry.Subtree != nil {
b, err := tree.Map.FindID(entry.Subtree)
@ -52,7 +52,7 @@ func print_tree(prefix string, s restic.Server, blob restic.Blob) error {
return err
}
err = print_tree(filepath.Join(prefix, entry.Name), s, b)
err = printTree(filepath.Join(prefix, entry.Name), s, b)
if err != nil {
return err
}
@ -93,5 +93,5 @@ func (cmd CmdLs) Execute(args []string) error {
fmt.Printf("snapshot of %v at %s:\n", sn.Paths, sn.Time)
return print_tree("", s, sn.Tree)
return printTree("", s, sn.Tree)
}

View file

@ -60,12 +60,12 @@ func TestPoly1305(t *testing.T) {
}
}
var test_values = []struct {
ekey EncryptionKey
skey SigningKey
ciphertext []byte
plaintext []byte
should_panic bool
var testValues = []struct {
ekey EncryptionKey
skey SigningKey
ciphertext []byte
plaintext []byte
shouldPanic bool
}{
{
ekey: EncryptionKey([...]byte{0x30, 0x3e, 0x86, 0x87, 0xb1, 0xd7, 0xdb, 0x18, 0x42, 0x1b, 0xdc, 0x6b, 0xb8, 0x58, 0x8c, 0xca,
@ -74,21 +74,21 @@ var test_values = []struct {
K: [...]byte{0xef, 0x4d, 0x88, 0x24, 0xcb, 0x80, 0xb2, 0xbc, 0xc5, 0xfb, 0xff, 0x8a, 0x9b, 0x12, 0xa4, 0x2c},
R: [...]byte{0xcc, 0x8d, 0x4b, 0x94, 0x8e, 0xe0, 0xeb, 0xfe, 0x1d, 0x41, 0x5d, 0xe9, 0x21, 0xd1, 0x03, 0x53},
},
ciphertext: decode_hex("69fb41c62d12def4593bd71757138606338f621aeaeb39da0fe4f99233f8037a54ea63338a813bcf3f75d8c3cc75dddf8750"),
ciphertext: decodeHex("69fb41c62d12def4593bd71757138606338f621aeaeb39da0fe4f99233f8037a54ea63338a813bcf3f75d8c3cc75dddf8750"),
plaintext: []byte("Dies ist ein Test!"),
},
}
func decode_hex(s string) []byte {
func decodeHex(s string) []byte {
d, _ := hex.DecodeString(s)
return d
}
// returns true if function called panic
func should_panic(f func()) (did_panic bool) {
func shouldPanic(f func()) (didPanic bool) {
defer func() {
if r := recover(); r != nil {
did_panic = true
didPanic = true
}
}()
@ -99,7 +99,7 @@ func should_panic(f func()) (did_panic bool) {
func TestCrypto(t *testing.T) {
msg := make([]byte, 0, 8*1024*1024) // use 8MiB for now
for _, tv := range test_values {
for _, tv := range testValues {
// test encryption
k := &Key{
Encrypt: tv.ekey,

View file

@ -70,7 +70,7 @@ func NodeFromFileInfo(path string, fi os.FileInfo) (*Node, error) {
node.Size = uint64(fi.Size())
}
err := node.fill_extra(path, fi)
err := node.fillExtra(path, fi)
return node, err
}

View file

@ -15,7 +15,7 @@ func (node *Node) OpenForReading() (*os.File, error) {
return os.Open(n.path)
}
func (node *Node) fill_extra(path string, fi os.FileInfo) (err error) {
func (node *Node) fillExtra(path string, fi os.FileInfo) (err error) {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return

View file

@ -15,7 +15,7 @@ func (node *Node) OpenForReading() (*os.File, error) {
return os.OpenFile(node.path, os.O_RDONLY|syscall.O_NOATIME, 0)
}
func (node *Node) fill_extra(path string, fi os.FileInfo) error {
func (node *Node) fillExtra(path string, fi os.FileInfo) error {
stat, ok := fi.Sys().(*syscall.Stat_t)
if !ok {
return nil

View file

@ -6,7 +6,7 @@ func (node *Node) OpenForReading() (*os.File, error) {
return os.Open(n.path)
}
func (node *Node) fill_extra(path string, fi os.FileInfo) error {
func (node *Node) fillExtra(path string, fi os.FileInfo) error {
return nil
}