chore: update dns providers libs. (#877)

This commit is contained in:
Ludovic Fernandez 2019-06-24 20:08:55 +02:00 committed by GitHub
parent 83618fed79
commit ac65f6c6a9
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
180 changed files with 6675 additions and 5000 deletions

89
Gopkg.lock generated
View file

@ -63,7 +63,7 @@
revision = "a3fa4a771d87bda2514a90a157e1fed1b6897d2e" revision = "a3fa4a771d87bda2514a90a157e1fed1b6897d2e"
[[projects]] [[projects]]
digest = "1:47071ecf8d840dd357ede1b2aed46576bdd0a866adecef3c9e85a00db9672202" digest = "1:dddee1f9ce7caecc95ae089c721a65ed42b35f8cc8a8b8a3ee6e3758ec93ec4b"
name = "github.com/akamai/AkamaiOPEN-edgegrid-golang" name = "github.com/akamai/AkamaiOPEN-edgegrid-golang"
packages = [ packages = [
"client-v1", "client-v1",
@ -72,8 +72,8 @@
"jsonhooks-v1", "jsonhooks-v1",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "1471ce9c14c6d8c007516e129262962a628fecdf" revision = "009960c8b2c7c57a0c5c488a3c8c778c16f3f586"
version = "v0.7.3" version = "v0.7.4"
[[projects]] [[projects]]
digest = "1:823e87ae25170339e2bfd1d6f7c2e27554c6bb5655f91c67b37bd5be45bb6b32" digest = "1:823e87ae25170339e2bfd1d6f7c2e27554c6bb5655f91c67b37bd5be45bb6b32"
@ -168,12 +168,12 @@
version = "v1.1.1" version = "v1.1.1"
[[projects]] [[projects]]
branch = "master" digest = "1:fa62421bd924623ac10a160686cc55d529f7274b2caedf7d2c607d14bc50c118"
digest = "1:981e5cf3c23056668c447439e90d3a441358eb68e5189b1952c909772383b944"
name = "github.com/decker502/dnspod-go" name = "github.com/decker502/dnspod-go"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "83a3ba562b048c9fc88229408e593494b7774684" revision = "71fbbdbdf1a7eeac949586de15bf96d416d3dd63"
version = "v0.2.0"
[[projects]] [[projects]]
digest = "1:7a6852b35eb5bbc184561443762d225116ae630c26a7c4d90546619f1e7d2ad2" digest = "1:7a6852b35eb5bbc184561443762d225116ae630c26a7c4d90546619f1e7d2ad2"
@ -192,20 +192,20 @@
revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c" revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c"
[[projects]] [[projects]]
digest = "1:e856fc44ab196970612bdc8c15e65ccf92ed8d4ccb3a2e65b88dc240a2fe5d0b" digest = "1:5ec1feea329f68f736c43e81a218c18e4e83779fb96d118becb97a9f2f7d6fc4"
name = "github.com/dnsimple/dnsimple-go" name = "github.com/dnsimple/dnsimple-go"
packages = ["dnsimple"] packages = ["dnsimple"]
pruneopts = "NUT" pruneopts = "NUT"
revision = "f5ead9c20763fd925dea1362f2af5d671ed2a459" revision = "8f70b647443816578a776f6c6d3e84deb11e1731"
version = "v0.21.0" version = "v0.23.0"
[[projects]] [[projects]]
digest = "1:9013e4c7a6ff077dcb478aa75524aa73b5763136efb4efe8949b18d9b90dd212" digest = "1:e73fd806b49d0e5c5d81776f8b33e1c35918cdd6c02dea4f1c2d91e70c8aa378"
name = "github.com/exoscale/egoscale" name = "github.com/exoscale/egoscale"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "4acc53d7aa0960f007acf1daedef69a6d24d8d1f" revision = "4e527724b8225e8315d580accd1c4e860202d41b"
version = "v0.14.0" version = "v0.17.1"
[[projects]] [[projects]]
digest = "1:aa3ed0a71c4e66e4ae6486bf97a3f4cab28edc78df2e50c5ad01dc7d91604b88" digest = "1:aa3ed0a71c4e66e4ae6486bf97a3f4cab28edc78df2e50c5ad01dc7d91604b88"
@ -232,12 +232,12 @@
version = "v1.38.2" version = "v1.38.2"
[[projects]] [[projects]]
digest = "1:ed6fe3cdb5ccb17387b5cb11c0cbd125699f5f5d23626ccf78a7c5859183e063" digest = "1:bde9f189072512ba353f3641d4839cb4c9c7edf421e467f2c03f267b402bd16c"
name = "github.com/go-resty/resty" name = "github.com/gofrs/uuid"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "97a15579492cd5f35632499f315d7a8df94160a1" revision = "6b08a5c5172ba18946672b49749cde22873dd7c2"
version = "v1.8.0" version = "v3.2.0"
[[projects]] [[projects]]
digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62" digest = "1:63ccdfbd20f7ccd2399d0647a7d100b122f79c13bb83da9660b1598396fd9f62"
@ -357,20 +357,20 @@
revision = "8b16b4848295edda07b9a828e5a3b285c25c2b9c" revision = "8b16b4848295edda07b9a828e5a3b285c25c2b9c"
[[projects]] [[projects]]
digest = "1:111ff5a09a32895248270bfaef9b8b6ac163a8cde9cdd603fed64b3e4b59e8ab" digest = "1:a0181b662584429828ccdd68d4dbe09ba7ba7414ef01d54b96f152002802ed72"
name = "github.com/linode/linodego" name = "github.com/linode/linodego"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "d0d31d8ca62fa3f7e4526ca0ce95de81e4ed001e" revision = "42d84d42f3f28fe1fe1823c58c11715241eda24e"
version = "v0.5.1" version = "v0.7.1"
[[projects]] [[projects]]
digest = "1:6676c63cef61a47c84eae578bcd8fe8352908ccfe3ea663c16797617a29e3c44" digest = "1:bcc0ec1552cdcd95ee76052a01273eb35917df74825420b3e3df12c3b8d5e6ed"
name = "github.com/miekg/dns" name = "github.com/miekg/dns"
packages = ["."] packages = ["."]
pruneopts = "NUT" pruneopts = "NUT"
revision = "a220737569d8137d4c610f80bd33f1dc762522e5" revision = "8aa92d4e02c501ba21e26fb92cf2fb9f23f56917"
version = "v1.1.0" version = "v1.1.9"
[[projects]] [[projects]]
digest = "1:a4df73029d2c42fabcb6b41e327d2f87e685284ec03edf76921c267d9cfc9c23" digest = "1:a4df73029d2c42fabcb6b41e327d2f87e685284ec03edf76921c267d9cfc9c23"
@ -429,23 +429,23 @@
version = "v0.6.0" version = "v0.6.0"
[[projects]] [[projects]]
digest = "1:c665ac9fd8cedb7bea85380a441dec4e5de3c7d4574e18c6ba53dfe04cf50878" digest = "1:50ba4638005d50396250b601811becb6b11ab13b0b7fa8ccbe119e21ce08a4fa"
name = "github.com/oracle/oci-go-sdk" name = "github.com/oracle/oci-go-sdk"
packages = [ packages = [
"common", "common",
"dns", "dns",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "f1ecb80f81a9cfa7dbfc964a34a3c62323277a9d" revision = "9ed9700756ebdedc96eaefcccf9322afbd4009ef"
version = "v4.0.0" version = "v5.4.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:02584222c11b07d1d61cd76cc73e78cbc72810384a7ea69c543246d60d3335f7" digest = "1:f703c1f05c29329026c4f49bd6c71442537e1f964d3a4f310982702a293f28c6"
name = "github.com/ovh/go-ovh" name = "github.com/ovh/go-ovh"
packages = ["ovh"] packages = ["ovh"]
pruneopts = "NUT" pruneopts = "NUT"
revision = "c3e61035ea66f5c637719c90140da4e3ac3b1bf0" revision = "ba5adb4cf0148a3dbdbd30586f075266256a77b1"
[[projects]] [[projects]]
digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121" digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121"
@ -472,7 +472,7 @@
revision = "1031fa0ce2f20c1c0e1e1b51951d8ea02c84fa05" revision = "1031fa0ce2f20c1c0e1e1b51951d8ea02c84fa05"
[[projects]] [[projects]]
digest = "1:5a632dc3d841d803524bd442976a52e390e4294826ece65722cb77020879d156" digest = "1:253f275bd72c42f8d234712d1574c8b222fe9b72838bfaca11b21ace9c0e3d0a"
name = "github.com/sacloud/libsacloud" name = "github.com/sacloud/libsacloud"
packages = [ packages = [
".", ".",
@ -482,8 +482,8 @@
"utils/mutexkv", "utils/mutexkv",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "a949b57af53e809207587f8c41571d81f140276e" revision = "41c392dee98a83260abbe0fcd5c13beb7c75d103"
version = "v1.19.0" version = "v1.21.1"
[[projects]] [[projects]]
digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca" digest = "1:6bc0652ea6e39e22ccd522458b8bdd8665bf23bdc5a20eec90056e4dc7e273ca"
@ -534,8 +534,7 @@
revision = "37e84520dcf74488f67654f9c775b9752c232dc1" revision = "37e84520dcf74488f67654f9c775b9752c232dc1"
[[projects]] [[projects]]
branch = "master" digest = "1:21ef4b12d6b6c6dfe389caa82f859d1e35eeb24ce61c60d09db57addb6a95781"
digest = "1:3b236e8930d31aeb375fe405c15c2afc581e04bd6cb68da4723e1aa8d2e2da37"
name = "github.com/transip/gotransip" name = "github.com/transip/gotransip"
packages = [ packages = [
".", ".",
@ -543,7 +542,8 @@
"util", "util",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "1dc93a7db3567a5ccf865106afac88278ba940cf" revision = "c6e2ce0bbb4a601a909e3b7a773358d6b503e663"
version = "v5.8.2"
[[projects]] [[projects]]
digest = "1:5dba68a1600a235630e208cb7196b24e58fcbb77bb7a6bec08fcd23f081b0a58" digest = "1:5dba68a1600a235630e208cb7196b24e58fcbb77bb7a6bec08fcd23f081b0a58"
@ -578,6 +578,17 @@
revision = "8930459677fde1e11e3e1b50bbed1acc850b5665" revision = "8930459677fde1e11e3e1b50bbed1acc850b5665"
version = "v0.20.1" version = "v0.20.1"
[[projects]]
branch = "master"
digest = "1:02fe59517e10f9b400b500af8ac228c74cecb0cba7a5f438d8283edb97e14270"
name = "go.uber.org/ratelimit"
packages = [
".",
"internal/clock",
]
pruneopts = "NUT"
revision = "c15da02342779cb6dc027fc95ee2277787698f36"
[[projects]] [[projects]]
branch = "master" branch = "master"
digest = "1:af471e1efb6318085c2c27ba0bcbf659feab5f7044388c0c0104f456831eb281" digest = "1:af471e1efb6318085c2c27ba0bcbf659feab5f7044388c0c0104f456831eb281"
@ -769,7 +780,7 @@
[[projects]] [[projects]]
branch = "v2" branch = "v2"
digest = "1:f15af196d07dbb28a0633599eba0fb872acd0b24426001520dd6cb9944c7ad1a" digest = "1:96d26da4d68299289c40ad10cd588648472fdc96ebbcdac82c04738363ee02fa"
name = "gopkg.in/ns1/ns1-go.v2" name = "gopkg.in/ns1/ns1-go.v2"
packages = [ packages = [
"rest", "rest",
@ -780,7 +791,15 @@
"rest/model/monitor", "rest/model/monitor",
] ]
pruneopts = "NUT" pruneopts = "NUT"
revision = "028658c6d9be774b6d103a923d8c4b2715135c3f" revision = "6c599e5e57901a8e58e1729f444de1edeb77bf97"
[[projects]]
digest = "1:0942599d1f614d9ca4dfe052db1f60d4547b1b581206006be352f629a8b37d8d"
name = "gopkg.in/resty.v1"
packages = ["."]
pruneopts = "NUT"
revision = "fa5875c0caa5c260ab78acec5a244215a730247f"
version = "v1.12.0"
[[projects]] [[projects]]
digest = "1:a50fabe7a46692dc7c656310add3d517abe7914df02afd151ef84da884605dc8" digest = "1:a50fabe7a46692dc7c656310add3d517abe7914df02afd151ef84da884605dc8"

View file

@ -29,38 +29,6 @@
go-tests = true go-tests = true
unused-packages = true unused-packages = true
[[constraint]]
branch = "master"
name = "github.com/decker502/dnspod-go"
[[constraint]]
version = "0.21.0"
name = "github.com/dnsimple/dnsimple-go"
[[constraint]]
branch = "master"
name = "github.com/namedotcom/go"
[[constraint]]
branch = "master"
name = "github.com/ovh/go-ovh"
[[constraint]]
branch = "master"
name = "github.com/rainycape/memcache"
[[constraint]]
branch = "master"
name = "github.com/timewasted/linode"
[[constraint]]
version = "0.6.0"
name = "github.com/nrdcg/goinwx"
[[constraint]]
version = "0.5.1"
name = "github.com/linode/linodego"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
@ -77,35 +45,66 @@
branch = "master" branch = "master"
name = "google.golang.org/api" name = "google.golang.org/api"
[[constraint]]
version = "v1.1.9"
name = "github.com/miekg/dns"
[[constraint]]
branch = "master"
name = "github.com/rainycape/memcache"
[[constraint]]
version = "0.2.0"
name = "github.com/decker502/dnspod-go"
[[constraint]]
version = "0.23.0"
name = "github.com/dnsimple/dnsimple-go"
[[constraint]]
branch = "master"
name = "github.com/namedotcom/go"
[[constraint]]
branch = "master"
name = "github.com/ovh/go-ovh"
[[constraint]]
branch = "master"
name = "github.com/timewasted/linode"
[[constraint]]
version = "0.6.0"
name = "github.com/nrdcg/goinwx"
[[constraint]]
version = "0.7.1"
name = "github.com/linode/linodego"
[[constraint]] [[constraint]]
branch = "v2" branch = "v2"
name = "gopkg.in/ns1/ns1-go.v2" name = "gopkg.in/ns1/ns1-go.v2"
[[constraint]] [[constraint]]
version = "1.19.0" version = "1.21.1"
name = "github.com/sacloud/libsacloud" name = "github.com/sacloud/libsacloud"
[[constraint]] [[constraint]]
branch = "master" version = "v5.8.2"
name = "github.com/transip/gotransip" name = "github.com/transip/gotransip"
[[constraint]] [[constraint]]
version = "0.14.0" version = "0.17.1"
name = "github.com/exoscale/egoscale" name = "github.com/exoscale/egoscale"
[[constraint]] [[constraint]]
version = "v1.1.0" version = "v0.7.4"
name = "github.com/miekg/dns"
[[constraint]]
version = "v0.7.3"
name = "github.com/akamai/AkamaiOPEN-edgegrid-golang" name = "github.com/akamai/AkamaiOPEN-edgegrid-golang"
[[constraint]] [[constraint]]
version = "4.0.0" version = "5.4.0"
name = "github.com/oracle/oci-go-sdk" name = "github.com/oracle/oci-go-sdk"
[[constraint]] [[constraint]]
name = "github.com/labbsr0x/bindman-dns-webhook" name = "github.com/labbsr0x/bindman-dns-webhook"
version = "1.0.0" version = "1.0.0"

View file

@ -137,9 +137,8 @@ func (d *DNSProvider) getHostedZone(domain string) (string, string, error) {
} }
} }
if hostedZone.ID == 0 { if hostedZone.ID == "" || hostedZone.ID == "0" {
return "", "", fmt.Errorf("zone %s not found in dnspod for domain %s", authZone, domain) return "", "", fmt.Errorf("zone %s not found in dnspod for domain %s", authZone, domain)
} }
return fmt.Sprintf("%v", hostedZone.ID), hostedZone.Name, nil return fmt.Sprintf("%v", hostedZone.ID), hostedZone.Name, nil

View file

@ -9,6 +9,7 @@ import (
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net/http" "net/http"
"os"
"sort" "sort"
"strings" "strings"
"time" "time"
@ -32,6 +33,26 @@ func AddRequestHeader(config Config, req *http.Request) *http.Request {
req.Header.Set("Content-Type", "application/json") req.Header.Set("Content-Type", "application/json")
} }
_, AkamaiCliEnvOK := os.LookupEnv("AKAMAI_CLI")
AkamaiCliVersionEnv, AkamaiCliVersionEnvOK := os.LookupEnv("AKAMAI_CLI_VERSION")
AkamaiCliCommandEnv, AkamaiCliCommandEnvOK := os.LookupEnv("AKAMAI_CLI_COMMAND")
AkamaiCliCommandVersionEnv, AkamaiCliCommandVersionEnvOK := os.LookupEnv("AKAMAI_CLI_COMMAND_VERSION")
if AkamaiCliEnvOK && AkamaiCliVersionEnvOK {
if req.Header.Get("User-Agent") != "" {
req.Header.Set("User-Agent", req.Header.Get("User-Agent")+" AkamaiCLI/"+AkamaiCliVersionEnv)
} else {
req.Header.Set("User-Agent", "AkamaiCLI/"+AkamaiCliVersionEnv)
}
}
if AkamaiCliCommandEnvOK && AkamaiCliCommandVersionEnvOK {
if req.Header.Get("User-Agent") != "" {
req.Header.Set("User-Agent", req.Header.Get("User-Agent")+" AkamaiCLI-"+AkamaiCliCommandEnv+"/"+AkamaiCliCommandVersionEnv)
} else {
req.Header.Set("User-Agent", "AkamaiCLI-"+AkamaiCliCommandEnv+"/"+AkamaiCliCommandVersionEnv)
}
}
req.Header.Set("Authorization", createAuthHeader(config, req, timestamp, nonce)) req.Header.Set("Authorization", createAuthHeader(config, req, timestamp, nonce))
return req return req
} }

View file

@ -5,31 +5,24 @@ package dnspod
import ( import (
// "bytes" // "bytes"
"encoding/json"
"fmt" "fmt"
"io" "io"
"net"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
"time" "time"
"unsafe"
"github.com/json-iterator/go"
) )
var json = jsoniter.ConfigCompatibleWithStandardLibrary
func init() {
jsoniter.RegisterFieldDecoderFunc("dnspod.Domain", "GroupID", func(ptr unsafe.Pointer, iter *jsoniter.Iterator) {
*((*string)(ptr)) = iter.ReadAny().ToString()
})
}
const ( const (
libraryVersion = "0.1" libraryVersion = "0.1"
baseURL = "https://dnsapi.cn/" baseURL = "https://dnsapi.cn/"
userAgent = "dnspod-go/" + libraryVersion userAgent = "dnspod-go/" + libraryVersion
apiVersion = "v1" apiVersion = "v1"
timeout = 5
keepAlive = 30
) )
// dnspod API docs: https://www.dnspod.cn/docs/info.html // dnspod API docs: https://www.dnspod.cn/docs/info.html
@ -40,6 +33,8 @@ type CommonParams struct {
Lang string Lang string
ErrorOnEmpty string ErrorOnEmpty string
UserID string UserID string
Timeout int
KeepAlive int
} }
func newPayLoad(params CommonParams) url.Values { func newPayLoad(params CommonParams) url.Values {
@ -59,7 +54,6 @@ func newPayLoad(params CommonParams) url.Values {
} }
if params.UserID != "" { if params.UserID != "" {
p.Set("user_id", params.UserID) p.Set("user_id", params.UserID)
} }
return p return p
@ -92,7 +86,27 @@ type Client struct {
// NewClient returns a new dnspod API client. // NewClient returns a new dnspod API client.
func NewClient(CommonParams CommonParams) *Client { func NewClient(CommonParams CommonParams) *Client {
c := &Client{HttpClient: &http.Client{}, CommonParams: CommonParams, BaseURL: baseURL, UserAgent: userAgent} var _timeout, _keepalive int
_timeout = timeout
_keepalive = keepAlive
if CommonParams.Timeout != 0 {
_timeout = CommonParams.Timeout
}
if CommonParams.KeepAlive != 0 {
_keepalive = CommonParams.KeepAlive
}
cli := http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: time.Duration(_timeout) * time.Second,
KeepAlive: time.Duration(_keepalive) * time.Second,
}).Dial,
},
}
c := &Client{HttpClient: &cli, CommonParams: CommonParams, BaseURL: baseURL, UserAgent: userAgent}
c.Domains = &DomainsService{client: c} c.Domains = &DomainsService{client: c}
return c return c

View file

@ -1,6 +1,7 @@
package dnspod package dnspod
import ( import (
"encoding/json"
"fmt" "fmt"
"strconv" "strconv"
// "time" // "time"
@ -30,26 +31,26 @@ type DomainInfo struct {
} }
type Domain struct { type Domain struct {
ID int `json:"id,omitempty"` ID json.Number `json:"id,omitempty"`
Name string `json:"name,omitempty"` Name string `json:"name,omitempty"`
PunyCode string `json:"punycode,omitempty"` PunyCode string `json:"punycode,omitempty"`
Grade string `json:"grade,omitempty"` Grade string `json:"grade,omitempty"`
GradeTitle string `json:"grade_title,omitempty"` GradeTitle string `json:"grade_title,omitempty"`
Status string `json:"status,omitempty"` Status string `json:"status,omitempty"`
ExtStatus string `json:"ext_status,omitempty"` ExtStatus string `json:"ext_status,omitempty"`
Records string `json:"records,omitempty"` Records string `json:"records,omitempty"`
GroupID string `json:"group_id,omitempty"` GroupID json.Number `json:"group_id,omitempty"`
IsMark string `json:"is_mark,omitempty"` IsMark string `json:"is_mark,omitempty"`
Remark string `json:"remark,omitempty"` Remark string `json:"remark,omitempty"`
IsVIP string `json:"is_vip,omitempty"` IsVIP string `json:"is_vip,omitempty"`
SearchenginePush string `json:"searchengine_push,omitempty"` SearchenginePush string `json:"searchengine_push,omitempty"`
UserID string `json:"user_id,omitempty"` UserID string `json:"user_id,omitempty"`
CreatedOn string `json:"created_on,omitempty"` CreatedOn string `json:"created_on,omitempty"`
UpdatedOn string `json:"updated_on,omitempty"` UpdatedOn string `json:"updated_on,omitempty"`
TTL string `json:"ttl,omitempty"` TTL string `json:"ttl,omitempty"`
CNameSpeedUp string `json:"cname_speedup,omitempty"` CNameSpeedUp string `json:"cname_speedup,omitempty"`
Owner string `json:"owner,omitempty"` Owner string `json:"owner,omitempty"`
AuthToAnquanBao bool `json:"auth_to_anquanbao,omitempty"` AuthToAnquanBao bool `json:"auth_to_anquanbao,omitempty"`
} }
type domainListWrapper struct { type domainListWrapper struct {
@ -113,7 +114,7 @@ func (s *DomainsService) Create(domainAttributes Domain) (Domain, *Response, err
payload := newPayLoad(s.client.CommonParams) payload := newPayLoad(s.client.CommonParams)
payload.Set("domain", domainAttributes.Name) payload.Set("domain", domainAttributes.Name)
payload.Set("group_id", domainAttributes.GroupID) payload.Set("group_id", domainAttributes.GroupID.String())
payload.Set("is_mark", domainAttributes.IsMark) payload.Set("is_mark", domainAttributes.IsMark)
res, err := s.client.post(path, payload, &returnedDomain) res, err := s.client.post(path, payload, &returnedDomain)

View file

@ -23,7 +23,7 @@ const (
// This is a pro-forma convention given that Go dependencies // This is a pro-forma convention given that Go dependencies
// tends to be fetched directly from the repo. // tends to be fetched directly from the repo.
// It is also used in the user-agent identify the client. // It is also used in the user-agent identify the client.
Version = "0.21.0" Version = "0.23.0"
// defaultBaseURL to the DNSimple production API. // defaultBaseURL to the DNSimple production API.
defaultBaseURL = "https://api.dnsimple.com" defaultBaseURL = "https://api.dnsimple.com"

View file

@ -129,18 +129,10 @@ func (s *DomainsService) DeleteDomain(accountID string, domainIdentifier string)
return domainResponse, nil return domainResponse, nil
} }
// ResetDomainToken resets the domain token. // DEPRECATED
// //
// See https://developer.dnsimple.com/v2/domains/#reset-token // See https://developer.dnsimple.com/v2/domains/#reset-token
func (s *DomainsService) ResetDomainToken(accountID string, domainIdentifier string) (*domainResponse, error) { func (s *DomainsService) ResetDomainToken(accountID string, domainIdentifier string) (*domainResponse, error) {
path := versioned(domainPath(accountID, domainIdentifier) + "/token") // noop
domainResponse := &domainResponse{} return &domainResponse{}, nil
resp, err := s.client.post(path, nil, domainResponse)
if err != nil {
return nil, err
}
domainResponse.HttpResponse = resp
return domainResponse, nil
} }

View file

@ -14,12 +14,30 @@ type WhoisPrivacy struct {
UpdatedAt string `json:"updated_at,omitempty"` UpdatedAt string `json:"updated_at,omitempty"`
} }
// WhoisPrivacyRenewal represents a whois privacy renewal in DNSimple.
type WhoisPrivacyRenewal struct {
ID int64 `json:"id,omitempty"`
DomainID int64 `json:"domain_id,omitempty"`
WhoisPrivacyID int64 `json:"whois_privacy_id,omitempty"`
State string `json:"string,omitempty"`
Enabled bool `json:"enabled,omitempty"`
ExpiresOn string `json:"expires_on,omitempty"`
CreatedAt string `json:"created_at,omitempty"`
UpdatedAt string `json:"updated_at,omitempty"`
}
// whoisPrivacyResponse represents a response from an API method that returns a WhoisPrivacy struct. // whoisPrivacyResponse represents a response from an API method that returns a WhoisPrivacy struct.
type whoisPrivacyResponse struct { type whoisPrivacyResponse struct {
Response Response
Data *WhoisPrivacy `json:"data"` Data *WhoisPrivacy `json:"data"`
} }
// whoisPrivacyRenewalResponse represents a response from an API method that returns a WhoisPrivacyRenewal struct.
type whoisPrivacyRenewalResponse struct {
Response
Data *WhoisPrivacyRenewal `json:"data"`
}
// GetWhoisPrivacy gets the whois privacy for the domain. // GetWhoisPrivacy gets the whois privacy for the domain.
// //
// See https://developer.dnsimple.com/v2/registrar/whois-privacy/#get // See https://developer.dnsimple.com/v2/registrar/whois-privacy/#get
@ -67,3 +85,19 @@ func (s *RegistrarService) DisableWhoisPrivacy(accountID string, domainName stri
privacyResponse.HttpResponse = resp privacyResponse.HttpResponse = resp
return privacyResponse, nil return privacyResponse, nil
} }
// RenewWhoisPrivacy renews the whois privacy for the domain.
//
// See https://developer.dnsimple.com/v2/registrar/whois-privacy/#renew
func (s *RegistrarService) RenewWhoisPrivacy(accountID string, domainName string) (*whoisPrivacyRenewalResponse, error) {
path := versioned(fmt.Sprintf("/%v/registrar/domains/%v/whois_privacy/renewals", accountID, domainName))
privacyRenewalResponse := &whoisPrivacyRenewalResponse{}
resp, err := s.client.post(path, nil, privacyRenewalResponse)
if err != nil {
return nil, err
}
privacyRenewalResponse.HttpResponse = resp
return privacyRenewalResponse, nil
}

View file

@ -6,6 +6,17 @@ import (
"net" "net"
) )
// Healthcheck represents an Healthcheck attached to an IP
type Healthcheck struct {
Interval int64 `json:"interval,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 10, minimum: 5"`
Mode string `json:"mode,omitempty" doc:"healthcheck definition: healthcheck mode can be either 'tcp' or 'http'"`
Path string `json:"path,omitempty" doc:"healthcheck definition: the path against which the 'http' healthcheck will be performed. Required if mode is 'http', ignored otherwise."`
Port int64 `json:"port,omitempty" doc:"healthcheck definition: the port against which the healthcheck will be performed. Required if a 'mode' is provided."`
StrikesFail int64 `json:"strikes-fail,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'dead'. Default: 3"`
StrikesOk int64 `json:"strikes-ok,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'alive'. Default: 2"`
Timeout int64 `json:"timeout,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 2, cannot be greater than interval."`
}
// IPAddress represents an IP Address // IPAddress represents an IP Address
type IPAddress struct { type IPAddress struct {
Allocated string `json:"allocated,omitempty" doc:"date the public IP address was acquired"` Allocated string `json:"allocated,omitempty" doc:"date the public IP address was acquired"`
@ -13,6 +24,7 @@ type IPAddress struct {
AssociatedNetworkID *UUID `json:"associatednetworkid,omitempty" doc:"the ID of the Network associated with the IP address"` AssociatedNetworkID *UUID `json:"associatednetworkid,omitempty" doc:"the ID of the Network associated with the IP address"`
AssociatedNetworkName string `json:"associatednetworkname,omitempty" doc:"the name of the Network associated with the IP address"` AssociatedNetworkName string `json:"associatednetworkname,omitempty" doc:"the name of the Network associated with the IP address"`
ForVirtualNetwork bool `json:"forvirtualnetwork,omitempty" doc:"the virtual network for the IP address"` ForVirtualNetwork bool `json:"forvirtualnetwork,omitempty" doc:"the virtual network for the IP address"`
Healthcheck *Healthcheck `json:"healthcheck,omitempty" doc:"The IP healthcheck configuration"`
ID *UUID `json:"id,omitempty" doc:"public IP address id"` ID *UUID `json:"id,omitempty" doc:"public IP address id"`
IPAddress net.IP `json:"ipaddress,omitempty" doc:"public IP address"` IPAddress net.IP `json:"ipaddress,omitempty" doc:"public IP address"`
IsElastic bool `json:"iselastic,omitempty" doc:"is an elastic ip"` IsElastic bool `json:"iselastic,omitempty" doc:"is an elastic ip"`
@ -77,10 +89,15 @@ func (ipaddress IPAddress) Delete(ctx context.Context, client *Client) error {
// AssociateIPAddress (Async) represents the IP creation // AssociateIPAddress (Async) represents the IP creation
type AssociateIPAddress struct { type AssociateIPAddress struct {
IsPortable *bool `json:"isportable,omitempty" doc:"should be set to true if public IP is required to be transferable across zones, if not specified defaults to false"` HealthcheckInterval int64 `json:"interval,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 10, minimum: 5"`
NetworkdID *UUID `json:"networkid,omitempty" doc:"The network this ip address should be associated to."` HealthcheckMode string `json:"mode,omitempty" doc:"healthcheck definition: healthcheck mode can be either 'tcp' or 'http'"`
ZoneID *UUID `json:"zoneid,omitempty" doc:"the ID of the availability zone you want to acquire an public IP address from"` HealthcheckPath string `json:"path,omitempty" doc:"healthcheck definition: the path against which the 'http' healthcheck will be performed. Required if mode is 'http', ignored otherwise."`
_ bool `name:"associateIpAddress" description:"Acquires and associates a public IP to an account."` HealthcheckPort int64 `json:"port,omitempty" doc:"healthcheck definition: the port against which the healthcheck will be performed. Required if a 'mode' is provided."`
HealthcheckStrikesFail int64 `json:"strikes-fail,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'dead'. Default: 3"`
HealthcheckStrikesOk int64 `json:"strikes-ok,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'alive'. Default: 2"`
HealthcheckTimeout int64 `json:"timeout,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 2, cannot be greater than interval."`
ZoneID *UUID `json:"zoneid,omitempty" doc:"the ID of the availability zone you want to acquire a public IP address from"`
_ bool `name:"associateIpAddress" description:"Acquires and associates a public IP to an account."`
} }
// Response returns the struct to unmarshal // Response returns the struct to unmarshal
@ -111,9 +128,15 @@ func (DisassociateIPAddress) AsyncResponse() interface{} {
// UpdateIPAddress (Async) represents the IP modification // UpdateIPAddress (Async) represents the IP modification
type UpdateIPAddress struct { type UpdateIPAddress struct {
ID *UUID `json:"id" doc:"the id of the public ip address to update"` HealthcheckInterval int64 `json:"interval,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 10, minimum: 5"`
CustomID *UUID `json:"customid,omitempty" doc:"an optional field, in case you want to set a custom id to the resource. Allowed to Root Admins only"` HealthcheckMode string `json:"mode,omitempty" doc:"healthcheck definition: healthcheck mode can be either 'tcp' or 'http'"`
_ bool `name:"updateIpAddress" description:"Updates an ip address"` HealthcheckPath string `json:"path,omitempty" doc:"healthcheck definition: the path against which the 'http' healthcheck will be performed. Required if mode is 'http', ignored otherwise."`
HealthcheckPort int64 `json:"port,omitempty" doc:"healthcheck definition: the port against which the healthcheck will be performed. Required if a 'mode' is provided."`
HealthcheckStrikesFail int64 `json:"strikes-fail,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'dead'. Default: 3"`
HealthcheckStrikesOk int64 `json:"strikes-ok,omitempty" doc:"healthcheck definition: number of times to retry before declaring the healthcheck 'alive'. Default: 2"`
HealthcheckTimeout int64 `json:"timeout,omitempty" doc:"healthcheck definition: time in seconds to wait for each check. Default: 2, cannot be greater than interval."`
ID *UUID `json:"id" doc:"the id of the public IP address to update"`
_ bool `name:"updateIpAddress" description:"Updates an IP address"`
} }
// Response returns the struct to unmarshal // Response returns the struct to unmarshal

View file

@ -11,12 +11,12 @@ import (
// Affinity and Anti-Affinity groups provide a way to influence where VMs should run. // Affinity and Anti-Affinity groups provide a way to influence where VMs should run.
// See: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/stable/virtual_machines.html#affinity-groups // See: http://docs.cloudstack.apache.org/projects/cloudstack-administration/en/stable/virtual_machines.html#affinity-groups
type AffinityGroup struct { type AffinityGroup struct {
Account string `json:"account,omitempty" doc:"the account owning the affinity group"` Account string `json:"account,omitempty" doc:"the account owning the affinity group"`
Description string `json:"description,omitempty" doc:"the description of the affinity group"` Description string `json:"description,omitempty" doc:"the description of the affinity group"`
ID *UUID `json:"id,omitempty" doc:"the ID of the affinity group"` ID *UUID `json:"id,omitempty" doc:"the ID of the affinity group"`
Name string `json:"name,omitempty" doc:"the name of the affinity group"` Name string `json:"name,omitempty" doc:"the name of the affinity group"`
Type string `json:"type,omitempty" doc:"the type of the affinity group"` Type string `json:"type,omitempty" doc:"the type of the affinity group"`
VirtualMachineIDs []string `json:"virtualmachineIds,omitempty" doc:"virtual machine Ids associated with this affinity group"` VirtualMachineIDs []UUID `json:"virtualmachineIds,omitempty" doc:"virtual machine Ids associated with this affinity group"`
} }
// ListRequest builds the ListAffinityGroups request // ListRequest builds the ListAffinityGroups request

View file

@ -14,9 +14,6 @@ import (
// DNSDomain represents a domain // DNSDomain represents a domain
type DNSDomain struct { type DNSDomain struct {
ID int64 `json:"id"` ID int64 `json:"id"`
AccountID int64 `json:"account_id,omitempty"`
UserID int64 `json:"user_id,omitempty"`
RegistrantID int64 `json:"registrant_id,omitempty"`
Name string `json:"name"` Name string `json:"name"`
UnicodeName string `json:"unicode_name"` UnicodeName string `json:"unicode_name"`
Token string `json:"token"` Token string `json:"token"`

View file

@ -8,7 +8,6 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log"
"net/http" "net/http"
"net/url" "net/url"
"strings" "strings"
@ -26,138 +25,6 @@ type RunstatusErrorResponse struct {
// runstatusPagesURL is the only URL that cannot be guessed // runstatusPagesURL is the only URL that cannot be guessed
const runstatusPagesURL = "/pages" const runstatusPagesURL = "/pages"
// RunstatusPage runstatus page
type RunstatusPage struct {
Created *time.Time `json:"created,omitempty"`
DarkTheme bool `json:"dark_theme,omitempty"`
Domain string `json:"domain,omitempty"`
GradientEnd string `json:"gradient_end,omitempty"`
GradientStart string `json:"gradient_start,omitempty"`
HeaderBackground string `json:"header_background,omitempty"`
ID int `json:"id,omitempty"`
Incidents []RunstatusIncident `json:"incidents,omitempty"`
IncidentsURL string `json:"incidents_url,omitempty"`
Logo string `json:"logo,omitempty"`
Maintenances []RunstatusMaintenance `json:"maintenances,omitempty"`
MaintenancesURL string `json:"maintenances_url,omitempty"`
Name string `json:"name"` //fake field (used to post a new runstatus page)
OkText string `json:"ok_text,omitempty"`
Plan string `json:"plan,omitempty"`
PublicURL string `json:"public_url,omitempty"`
Services []RunstatusService `json:"services,omitempty"`
ServicesURL string `json:"services_url,omitempty"`
State string `json:"state,omitempty"`
Subdomain string `json:"subdomain"`
SupportEmail string `json:"support_email,omitempty"`
TimeZone string `json:"time_zone,omitempty"`
Title string `json:"title,omitempty"`
TitleColor string `json:"title_color,omitempty"`
TwitterUsername string `json:"twitter_username,omitempty"`
URL string `json:"url,omitempty"`
}
// Match returns true if the other page has got similarities with itself
func (page RunstatusPage) Match(other RunstatusPage) bool {
if other.Subdomain != "" && page.Subdomain == other.Subdomain {
return true
}
if other.ID > 0 && page.ID == other.ID {
return true
}
return false
}
//RunstatusPageList runstatus page list
type RunstatusPageList struct {
Count int `json:"count"`
Next string `json:"next"`
Previous string `json:"previous"`
Results []RunstatusPage `json:"results"`
}
// CreateRunstatusPage create runstatus page
func (client *Client) CreateRunstatusPage(ctx context.Context, page RunstatusPage) (*RunstatusPage, error) {
resp, err := client.runstatusRequest(ctx, client.Endpoint+runstatusPagesURL, page, "POST")
if err != nil {
return nil, err
}
var p *RunstatusPage
if err := json.Unmarshal(resp, &p); err != nil {
return nil, err
}
return p, nil
}
// DeleteRunstatusPage delete runstatus page
func (client *Client) DeleteRunstatusPage(ctx context.Context, page RunstatusPage) error {
if page.URL == "" {
return fmt.Errorf("empty URL for %#v", page)
}
_, err := client.runstatusRequest(ctx, page.URL, nil, "DELETE")
return err
}
// GetRunstatusPage fetches the runstatus page
func (client *Client) GetRunstatusPage(ctx context.Context, page RunstatusPage) (*RunstatusPage, error) {
if page.URL != "" {
return client.getRunstatusPage(ctx, page.URL)
}
ps, err := client.ListRunstatusPages(ctx)
if err != nil {
return nil, err
}
for i := range ps {
if ps[i].Match(page) {
return &ps[i], nil
}
}
return nil, fmt.Errorf("%#v not found", page)
}
func (client *Client) getRunstatusPage(ctx context.Context, pageURL string) (*RunstatusPage, error) {
resp, err := client.runstatusRequest(ctx, pageURL, nil, "GET")
if err != nil {
return nil, err
}
p := new(RunstatusPage)
if err := json.Unmarshal(resp, p); err != nil {
return nil, err
}
// NOTE: fix the missing IDs
for i := range p.Maintenances {
if err := p.Maintenances[i].FakeID(); err != nil {
log.Printf("bad fake ID for %#v, %s", p.Maintenances[i], err)
}
}
return p, nil
}
// ListRunstatusPages list all the runstatus pages
func (client *Client) ListRunstatusPages(ctx context.Context) ([]RunstatusPage, error) {
resp, err := client.runstatusRequest(ctx, client.Endpoint+runstatusPagesURL, nil, "GET")
if err != nil {
return nil, err
}
var p *RunstatusPageList
if err := json.Unmarshal(resp, &p); err != nil {
return nil, err
}
// XXX: handle pagination
return p.Results, nil
}
// Error formats the DNSerror into a string // Error formats the DNSerror into a string
func (req RunstatusErrorResponse) Error() string { func (req RunstatusErrorResponse) Error() string {
return fmt.Sprintf("Runstatus error: %s", req.Detail) return fmt.Sprintf("Runstatus error: %s", req.Detail)
@ -239,7 +106,7 @@ func (client *Client) runstatusRequest(ctx context.Context, uri string, structPa
contentType := resp.Header.Get("content-type") contentType := resp.Header.Get("content-type")
if !strings.Contains(contentType, "application/json") { if !strings.Contains(contentType, "application/json") {
return nil, fmt.Errorf(`response content-type expected to be "application/json", got %q`, contentType) return nil, fmt.Errorf(`response %d content-type expected to be "application/json", got %q`, resp.StatusCode, contentType)
} }
b, err := ioutil.ReadAll(resp.Body) b, err := ioutil.ReadAll(resp.Body)

View file

@ -40,7 +40,9 @@ func (incident RunstatusIncident) Match(other RunstatusIncident) bool {
//RunstatusIncidentList is a list of incident //RunstatusIncidentList is a list of incident
type RunstatusIncidentList struct { type RunstatusIncidentList struct {
Incidents []RunstatusIncident `json:"incidents"` Next string `json:"next"`
Previous string `json:"previous"`
Incidents []RunstatusIncident `json:"results"`
} }
// GetRunstatusIncident retrieves the details of a specific incident. // GetRunstatusIncident retrieves the details of a specific incident.
@ -58,14 +60,10 @@ func (client *Client) GetRunstatusIncident(ctx context.Context, incident Runstat
return nil, err return nil, err
} }
is, err := client.ListRunstatusIncidents(ctx, *page) for i := range page.Incidents {
if err != nil { j := &page.Incidents[i]
return nil, err if j.Match(incident) {
} return j, nil
for i := range is {
if is[i].Match(incident) {
return &is[i], nil
} }
} }
@ -91,18 +89,51 @@ func (client *Client) ListRunstatusIncidents(ctx context.Context, page Runstatus
return nil, fmt.Errorf("empty Incidents URL for %#v", page) return nil, fmt.Errorf("empty Incidents URL for %#v", page)
} }
resp, err := client.runstatusRequest(ctx, page.IncidentsURL, nil, "GET") results := make([]RunstatusIncident, 0)
if err != nil {
return nil, err var err error
client.PaginateRunstatusIncidents(ctx, page, func(incident *RunstatusIncident, e error) bool {
if e != nil {
err = e
return false
}
results = append(results, *incident)
return true
})
return results, err
}
// PaginateRunstatusIncidents paginate Incidents
func (client *Client) PaginateRunstatusIncidents(ctx context.Context, page RunstatusPage, callback func(*RunstatusIncident, error) bool) {
if page.IncidentsURL == "" {
callback(nil, fmt.Errorf("empty Incidents URL for %#v", page))
return
} }
var p *RunstatusIncidentList incidentsURL := page.IncidentsURL
if err := json.Unmarshal(resp, &p); err != nil { for incidentsURL != "" {
return nil, err resp, err := client.runstatusRequest(ctx, incidentsURL, nil, "GET")
} if err != nil {
callback(nil, err)
return
}
// NOTE: no pagination var is *RunstatusIncidentList
return p.Incidents, nil if err := json.Unmarshal(resp, &is); err != nil {
callback(nil, err)
return
}
for i := range is.Incidents {
if cont := callback(&is.Incidents[i], nil); !cont {
return
}
}
incidentsURL = is.Next
}
} }
// CreateRunstatusIncident create runstatus incident // CreateRunstatusIncident create runstatus incident

View file

@ -65,9 +65,11 @@ func (maintenance *RunstatusMaintenance) FakeID() error {
return nil return nil
} }
//RunstatusMaintenanceList is a list of incident // RunstatusMaintenanceList is a list of incident
type RunstatusMaintenanceList struct { type RunstatusMaintenanceList struct {
Maintenances []RunstatusMaintenance `json:"maintenances"` Next string `json:"next"`
Previous string `json:"previous"`
Maintenances []RunstatusMaintenance `json:"results"`
} }
// GetRunstatusMaintenance retrieves the details of a specific maintenance. // GetRunstatusMaintenance retrieves the details of a specific maintenance.
@ -85,14 +87,13 @@ func (client *Client) GetRunstatusMaintenance(ctx context.Context, maintenance R
return nil, err return nil, err
} }
ms, err := client.ListRunstatusMaintenances(ctx, *page) for i := range page.Maintenances {
if err != nil { m := &page.Maintenances[i]
return nil, err if m.Match(maintenance) {
} if err := m.FakeID(); err != nil {
log.Printf("bad fake ID for %#v, %s", m, err)
for i := range ms { }
if ms[i].Match(maintenance) { return m, nil
return &ms[i], nil
} }
} }
@ -118,25 +119,54 @@ func (client *Client) ListRunstatusMaintenances(ctx context.Context, page Runsta
return nil, fmt.Errorf("empty Maintenances URL for %#v", page) return nil, fmt.Errorf("empty Maintenances URL for %#v", page)
} }
resp, err := client.runstatusRequest(ctx, page.MaintenancesURL, nil, "GET") results := make([]RunstatusMaintenance, 0)
if err != nil {
return nil, err
}
var p *RunstatusMaintenanceList var err error
if err := json.Unmarshal(resp, &p); err != nil { client.PaginateRunstatusMaintenances(ctx, page, func(maintenance *RunstatusMaintenance, e error) bool {
return nil, err if e != nil {
} err = e
return false
// NOTE: fix the missing IDs
for i := range p.Maintenances {
if err := p.Maintenances[i].FakeID(); err != nil {
log.Printf("bad fake ID for %#v, %s", p.Maintenances[i], err)
} }
results = append(results, *maintenance)
return true
})
return results, err
}
// PaginateRunstatusMaintenances paginate Maintenances
func (client *Client) PaginateRunstatusMaintenances(ctx context.Context, page RunstatusPage, callback func(*RunstatusMaintenance, error) bool) { // nolint: dupl
if page.MaintenancesURL == "" {
callback(nil, fmt.Errorf("empty Maintenances URL for %#v", page))
return
} }
// NOTE: the list of maintenances doesn't have any pagination maintenancesURL := page.MaintenancesURL
return p.Maintenances, nil for maintenancesURL != "" {
resp, err := client.runstatusRequest(ctx, maintenancesURL, nil, "GET")
if err != nil {
callback(nil, err)
return
}
var ms *RunstatusMaintenanceList
if err := json.Unmarshal(resp, &ms); err != nil {
callback(nil, err)
return
}
for i := range ms.Maintenances {
if err := ms.Maintenances[i].FakeID(); err != nil {
log.Printf("bad fake ID for %#v, %s", ms.Maintenances[i], err)
}
if cont := callback(&ms.Maintenances[i], nil); !cont {
return
}
}
maintenancesURL = ms.Next
}
} }
// CreateRunstatusMaintenance create runstatus Maintenance // CreateRunstatusMaintenance create runstatus Maintenance

168
vendor/github.com/exoscale/egoscale/runstatus_page.go generated vendored Normal file
View file

@ -0,0 +1,168 @@
package egoscale
import (
"context"
"encoding/json"
"fmt"
"log"
"time"
)
// RunstatusPage runstatus page
type RunstatusPage struct {
Created *time.Time `json:"created,omitempty"`
DarkTheme bool `json:"dark_theme,omitempty"`
Domain string `json:"domain,omitempty"`
GradientEnd string `json:"gradient_end,omitempty"`
GradientStart string `json:"gradient_start,omitempty"`
HeaderBackground string `json:"header_background,omitempty"`
ID int `json:"id,omitempty"`
Incidents []RunstatusIncident `json:"incidents,omitempty"`
IncidentsURL string `json:"incidents_url,omitempty"`
Logo string `json:"logo,omitempty"`
Maintenances []RunstatusMaintenance `json:"maintenances,omitempty"`
MaintenancesURL string `json:"maintenances_url,omitempty"`
Name string `json:"name"` //fake field (used to post a new runstatus page)
OkText string `json:"ok_text,omitempty"`
Plan string `json:"plan,omitempty"`
PublicURL string `json:"public_url,omitempty"`
Services []RunstatusService `json:"services,omitempty"`
ServicesURL string `json:"services_url,omitempty"`
State string `json:"state,omitempty"`
Subdomain string `json:"subdomain"`
SupportEmail string `json:"support_email,omitempty"`
TimeZone string `json:"time_zone,omitempty"`
Title string `json:"title,omitempty"`
TitleColor string `json:"title_color,omitempty"`
TwitterUsername string `json:"twitter_username,omitempty"`
URL string `json:"url,omitempty"`
}
// Match returns true if the other page has got similarities with itself
func (page RunstatusPage) Match(other RunstatusPage) bool {
if other.Subdomain != "" && page.Subdomain == other.Subdomain {
return true
}
if other.ID > 0 && page.ID == other.ID {
return true
}
return false
}
// RunstatusPageList runstatus page list
type RunstatusPageList struct {
Next string `json:"next"`
Previous string `json:"previous"`
Pages []RunstatusPage `json:"results"`
}
// CreateRunstatusPage create runstatus page
func (client *Client) CreateRunstatusPage(ctx context.Context, page RunstatusPage) (*RunstatusPage, error) {
resp, err := client.runstatusRequest(ctx, client.Endpoint+runstatusPagesURL, page, "POST")
if err != nil {
return nil, err
}
var p *RunstatusPage
if err := json.Unmarshal(resp, &p); err != nil {
return nil, err
}
return p, nil
}
// DeleteRunstatusPage delete runstatus page
func (client *Client) DeleteRunstatusPage(ctx context.Context, page RunstatusPage) error {
if page.URL == "" {
return fmt.Errorf("empty URL for %#v", page)
}
_, err := client.runstatusRequest(ctx, page.URL, nil, "DELETE")
return err
}
// GetRunstatusPage fetches the runstatus page
func (client *Client) GetRunstatusPage(ctx context.Context, page RunstatusPage) (*RunstatusPage, error) {
if page.URL != "" {
return client.getRunstatusPage(ctx, page.URL)
}
ps, err := client.ListRunstatusPages(ctx)
if err != nil {
return nil, err
}
for i := range ps {
if ps[i].Match(page) {
return client.getRunstatusPage(ctx, ps[i].URL)
}
}
return nil, fmt.Errorf("%#v not found", page)
}
func (client *Client) getRunstatusPage(ctx context.Context, pageURL string) (*RunstatusPage, error) {
resp, err := client.runstatusRequest(ctx, pageURL, nil, "GET")
if err != nil {
return nil, err
}
p := new(RunstatusPage)
if err := json.Unmarshal(resp, p); err != nil {
return nil, err
}
// NOTE: fix the missing IDs
for i := range p.Maintenances {
if err := p.Maintenances[i].FakeID(); err != nil {
log.Printf("bad fake ID for %#v, %s", p.Maintenances[i], err)
}
}
for i := range p.Services {
if err := p.Services[i].FakeID(); err != nil {
log.Printf("bad fake ID for %#v, %s", p.Services[i], err)
}
}
return p, nil
}
// ListRunstatusPages list all the runstatus pages
func (client *Client) ListRunstatusPages(ctx context.Context) ([]RunstatusPage, error) {
resp, err := client.runstatusRequest(ctx, client.Endpoint+runstatusPagesURL, nil, "GET")
if err != nil {
return nil, err
}
var p *RunstatusPageList
if err := json.Unmarshal(resp, &p); err != nil {
return nil, err
}
return p.Pages, nil
}
//PaginateRunstatusPages paginate on runstatus pages
func (client *Client) PaginateRunstatusPages(ctx context.Context, callback func(pages []RunstatusPage, e error) bool) {
pageURL := client.Endpoint + runstatusPagesURL
for pageURL != "" {
resp, err := client.runstatusRequest(ctx, pageURL, nil, "GET")
if err != nil {
callback(nil, err)
return
}
var p *RunstatusPageList
if err := json.Unmarshal(resp, &p); err != nil {
callback(nil, err)
return
}
if ok := callback(p.Pages, nil); ok {
return
}
pageURL = p.Next
}
}

View file

@ -58,7 +58,9 @@ func (service RunstatusService) Match(other RunstatusService) bool {
// RunstatusServiceList service list // RunstatusServiceList service list
type RunstatusServiceList struct { type RunstatusServiceList struct {
Services []RunstatusService `json:"services"` Next string `json:"next"`
Previous string `json:"previous"`
Services []RunstatusService `json:"results"`
} }
// DeleteRunstatusService delete runstatus service // DeleteRunstatusService delete runstatus service
@ -110,14 +112,13 @@ func (client *Client) GetRunstatusService(ctx context.Context, service Runstatus
return nil, err return nil, err
} }
ss, err := client.ListRunstatusServices(ctx, *page) for i := range page.Services {
if err != nil { s := &page.Services[i]
return nil, err if s.Match(service) {
} if err := s.FakeID(); err != nil {
log.Printf("bad fake ID for %#v, %s", s, err)
for i := range ss { }
if ss[i].Match(service) { return s, nil
return &ss[i], nil
} }
} }
@ -148,23 +149,53 @@ func (client *Client) ListRunstatusServices(ctx context.Context, page RunstatusP
return nil, fmt.Errorf("empty Services URL for %#v", page) return nil, fmt.Errorf("empty Services URL for %#v", page)
} }
resp, err := client.runstatusRequest(ctx, page.ServicesURL, nil, "GET") results := make([]RunstatusService, 0)
if err != nil {
return nil, err
}
var p *RunstatusServiceList var err error
if err := json.Unmarshal(resp, &p); err != nil { client.PaginateRunstatusServices(ctx, page, func(service *RunstatusService, e error) bool {
return nil, err if e != nil {
} err = e
return false
// NOTE: fix the missing IDs
for i := range p.Services {
if err := p.Services[i].FakeID(); err != nil {
log.Printf("bad fake ID for %#v, %s", p.Services[i], err)
} }
results = append(results, *service)
return true
})
return results, err
}
// PaginateRunstatusServices paginates Services
func (client *Client) PaginateRunstatusServices(ctx context.Context, page RunstatusPage, callback func(*RunstatusService, error) bool) { // nolint: dupl
if page.ServicesURL == "" {
callback(nil, fmt.Errorf("empty Services URL for %#v", page))
return
} }
// NOTE: no pagination servicesURL := page.ServicesURL
return p.Services, nil for servicesURL != "" {
resp, err := client.runstatusRequest(ctx, servicesURL, nil, "GET")
if err != nil {
callback(nil, err)
return
}
var ss *RunstatusServiceList
if err := json.Unmarshal(resp, &ss); err != nil {
callback(nil, err)
return
}
for i := range ss.Services {
if err := ss.Services[i].FakeID(); err != nil {
log.Printf("bad fake ID for %#v, %s", ss.Services[i], err)
}
if cont := callback(&ss.Services[i], nil); !cont {
return
}
}
servicesURL = ss.Next
}
} }

View file

@ -13,6 +13,7 @@ type Template struct {
Format string `json:"format,omitempty" doc:"the format of the template."` Format string `json:"format,omitempty" doc:"the format of the template."`
HostID *UUID `json:"hostid,omitempty" doc:"the ID of the secondary storage host for the template"` HostID *UUID `json:"hostid,omitempty" doc:"the ID of the secondary storage host for the template"`
HostName string `json:"hostname,omitempty" doc:"the name of the secondary storage host for the template"` HostName string `json:"hostname,omitempty" doc:"the name of the secondary storage host for the template"`
Hypervisor string `json:"hypervisor,omitempty" doc:"the target hypervisor for the template"`
ID *UUID `json:"id,omitempty" doc:"the template ID"` ID *UUID `json:"id,omitempty" doc:"the template ID"`
IsDynamicallyScalable bool `json:"isdynamicallyscalable,omitempty" doc:"true if template contains XS/VMWare tools inorder to support dynamic scaling of VM cpu/memory"` IsDynamicallyScalable bool `json:"isdynamicallyscalable,omitempty" doc:"true if template contains XS/VMWare tools inorder to support dynamic scaling of VM cpu/memory"`
IsExtractable bool `json:"isextractable,omitempty" doc:"true if the template is extractable, false otherwise"` IsExtractable bool `json:"isextractable,omitempty" doc:"true if the template is extractable, false otherwise"`
@ -20,8 +21,10 @@ type Template struct {
IsPublic bool `json:"ispublic,omitempty" doc:"true if this template is a public template, false otherwise"` IsPublic bool `json:"ispublic,omitempty" doc:"true if this template is a public template, false otherwise"`
IsReady bool `json:"isready,omitempty" doc:"true if the template is ready to be deployed from, false otherwise."` IsReady bool `json:"isready,omitempty" doc:"true if the template is ready to be deployed from, false otherwise."`
Name string `json:"name,omitempty" doc:"the template name"` Name string `json:"name,omitempty" doc:"the template name"`
OsTypeID *UUID `json:"ostypeid,omitempty" doc:"the ID of the OS type for this template."` OsCategoryID *UUID `json:"oscategoryid,omitempty" doc:"the ID of the OS category for this template"`
OsTypeName string `json:"ostypename,omitempty" doc:"the name of the OS type for this template."` OsCategoryName string `json:"oscategoryname,omitempty" doc:"the name of the OS category for this template"`
OsTypeID *UUID `json:"ostypeid,omitempty" doc:"the ID of the OS type for this template"`
OsTypeName string `json:"ostypename,omitempty" doc:"the name of the OS type for this template"`
PasswordEnabled bool `json:"passwordenabled,omitempty" doc:"true if the reset password feature is enabled, false otherwise"` PasswordEnabled bool `json:"passwordenabled,omitempty" doc:"true if the reset password feature is enabled, false otherwise"`
Removed string `json:"removed,omitempty" doc:"the date this template was removed"` Removed string `json:"removed,omitempty" doc:"the date this template was removed"`
Size int64 `json:"size,omitempty" doc:"the size of the template"` Size int64 `json:"size,omitempty" doc:"the size of the template"`
@ -118,3 +121,44 @@ type ListOSCategoriesResponse struct {
Count int `json:"count"` Count int `json:"count"`
OSCategory []OSCategory `json:"oscategory"` OSCategory []OSCategory `json:"oscategory"`
} }
// DeleteTemplate deletes a template by ID
type DeleteTemplate struct {
_ bool `name:"deleteTemplate" description:"Deletes a template"`
ID *UUID `json:"id" doc:"the ID of the template"`
}
// Response returns the struct to unmarshal
func (DeleteTemplate) Response() interface{} {
return new(AsyncJobResult)
}
// AsyncResponse returns the struct to unmarshal the async job
func (DeleteTemplate) AsyncResponse() interface{} {
return new(BooleanResponse)
}
// RegisterCustomTemplate registers a new template
type RegisterCustomTemplate struct {
_ bool `name:"registerCustomTemplate" description:"Register a new template."`
Checksum string `json:"checksum" doc:"the MD5 checksum value of this template"`
Details map[string]string `json:"details,omitempty" doc:"Template details in key/value pairs"`
Displaytext string `json:"displaytext" doc:"the display text of the template"`
IsFeatured *bool `json:"isfeatured,omitempty" doc:"true if this template is a featured template, default is false"`
Name string `json:"name" doc:"the name of the template"`
PasswordEnabled *bool `json:"passwordenabled,omitempty" doc:"true if the template supports the password reset feature; default is false"`
SSHKeyEnabled *bool `json:"sshkeyenabled,omitempty" doc:"true if the template supports the sshkey upload feature; default is false"`
TemplateTag string `json:"templatetag,omitempty" doc:"the tag for this template"`
URL string `json:"url" doc:"the URL of where the template is hosted"`
ZoneID *UUID `json:"zoneid" doc:"the ID of the zone the template is to be hosted on"`
}
// Response returns the struct to unmarshal
func (RegisterCustomTemplate) Response() interface{} {
return new(AsyncJobResult)
}
// AsyncResponse returns the struct to unmarshal the async job
func (RegisterCustomTemplate) AsyncResponse() interface{} {
return new([]Template)
}

View file

@ -4,7 +4,7 @@ import (
"encoding/json" "encoding/json"
"fmt" "fmt"
uuid "github.com/satori/go.uuid" uuid "github.com/gofrs/uuid"
) )
// UUID holds a UUID v4 // UUID holds a UUID v4
@ -38,7 +38,7 @@ func (u *UUID) DeepCopyInto(out *UUID) {
// Equal returns true if itself is equal to other. // Equal returns true if itself is equal to other.
func (u UUID) Equal(other UUID) bool { func (u UUID) Equal(other UUID) bool {
return uuid.Equal(u.UUID, other.UUID) return u == other
} }
// UnmarshalJSON unmarshals the raw JSON into the UUID. // UnmarshalJSON unmarshals the raw JSON into the UUID.

View file

@ -1,4 +1,4 @@
package egoscale package egoscale
// Version of the library // Version of the library
const Version = "0.14.0" const Version = "0.17.1"

View file

@ -314,8 +314,9 @@ func (DeployVirtualMachine) AsyncResponse() interface{} {
// StartVirtualMachine (Async) represents the creation of the virtual machine // StartVirtualMachine (Async) represents the creation of the virtual machine
type StartVirtualMachine struct { type StartVirtualMachine struct {
ID *UUID `json:"id" doc:"The ID of the virtual machine"` ID *UUID `json:"id" doc:"The ID of the virtual machine"`
_ bool `name:"startVirtualMachine" description:"Starts a virtual machine."` RescueProfile string `json:"rescueprofile,omitempty" doc:"An optional rescue profile to use when booting"`
_ bool `name:"startVirtualMachine" description:"Starts a virtual machine."`
} }
// Response returns the struct to unmarshal // Response returns the struct to unmarshal
@ -509,7 +510,7 @@ type ListVirtualMachines struct {
ForVirtualNetwork *bool `json:"forvirtualnetwork,omitempty" doc:"list by network type; true if need to list vms using Virtual Network, false otherwise"` ForVirtualNetwork *bool `json:"forvirtualnetwork,omitempty" doc:"list by network type; true if need to list vms using Virtual Network, false otherwise"`
GroupID *UUID `json:"groupid,omitempty" doc:"the group ID"` GroupID *UUID `json:"groupid,omitempty" doc:"the group ID"`
ID *UUID `json:"id,omitempty" doc:"the ID of the virtual machine"` ID *UUID `json:"id,omitempty" doc:"the ID of the virtual machine"`
IDs []string `json:"ids,omitempty" doc:"the IDs of the virtual machines, mutually exclusive with id"` IDs []UUID `json:"ids,omitempty" doc:"the IDs of the virtual machines, mutually exclusive with id"`
IPAddress net.IP `json:"ipaddress,omitempty" doc:"an IP address to filter the result"` IPAddress net.IP `json:"ipaddress,omitempty" doc:"an IP address to filter the result"`
IsoID *UUID `json:"isoid,omitempty" doc:"list vms by iso"` IsoID *UUID `json:"isoid,omitempty" doc:"list vms by iso"`
Keyword string `json:"keyword,omitempty" doc:"List by keyword"` Keyword string `json:"keyword,omitempty" doc:"List by keyword"`

View file

@ -1,9 +0,0 @@
// Copyright (c) 2015-2018 Jeevanandam M (jeeva@myjeeva.com), All rights reserved.
// resty source code and usage is governed by a MIT style
// license that can be found in the LICENSE file.
// Package resty provides simple HTTP and REST client for Go inspired by Ruby rest-client.
package resty
// Version # of resty
const Version = "1.8.0"

20
vendor/github.com/gofrs/uuid/LICENSE generated vendored Normal file
View file

@ -0,0 +1,20 @@
Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

212
vendor/github.com/gofrs/uuid/codec.go generated vendored Normal file
View file

@ -0,0 +1,212 @@
// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package uuid
import (
"bytes"
"encoding/hex"
"fmt"
)
// FromBytes returns a UUID generated from the raw byte slice input.
// It will return an error if the slice isn't 16 bytes long.
func FromBytes(input []byte) (UUID, error) {
u := UUID{}
err := u.UnmarshalBinary(input)
return u, err
}
// FromBytesOrNil returns a UUID generated from the raw byte slice input.
// Same behavior as FromBytes(), but returns uuid.Nil instead of an error.
func FromBytesOrNil(input []byte) UUID {
uuid, err := FromBytes(input)
if err != nil {
return Nil
}
return uuid
}
// FromString returns a UUID parsed from the input string.
// Input is expected in a form accepted by UnmarshalText.
func FromString(input string) (UUID, error) {
u := UUID{}
err := u.UnmarshalText([]byte(input))
return u, err
}
// FromStringOrNil returns a UUID parsed from the input string.
// Same behavior as FromString(), but returns uuid.Nil instead of an error.
func FromStringOrNil(input string) UUID {
uuid, err := FromString(input)
if err != nil {
return Nil
}
return uuid
}
// MarshalText implements the encoding.TextMarshaler interface.
// The encoding is the same as returned by the String() method.
func (u UUID) MarshalText() ([]byte, error) {
return []byte(u.String()), nil
}
// UnmarshalText implements the encoding.TextUnmarshaler interface.
// Following formats are supported:
//
// "6ba7b810-9dad-11d1-80b4-00c04fd430c8",
// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}",
// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
// "6ba7b8109dad11d180b400c04fd430c8"
// "{6ba7b8109dad11d180b400c04fd430c8}",
// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8"
//
// ABNF for supported UUID text representation follows:
//
// URN := 'urn'
// UUID-NID := 'uuid'
//
// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' |
// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' |
// 'A' | 'B' | 'C' | 'D' | 'E' | 'F'
//
// hexoct := hexdig hexdig
// 2hexoct := hexoct hexoct
// 4hexoct := 2hexoct 2hexoct
// 6hexoct := 4hexoct 2hexoct
// 12hexoct := 6hexoct 6hexoct
//
// hashlike := 12hexoct
// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct
//
// plain := canonical | hashlike
// uuid := canonical | hashlike | braced | urn
//
// braced := '{' plain '}' | '{' hashlike '}'
// urn := URN ':' UUID-NID ':' plain
//
func (u *UUID) UnmarshalText(text []byte) error {
switch len(text) {
case 32:
return u.decodeHashLike(text)
case 34, 38:
return u.decodeBraced(text)
case 36:
return u.decodeCanonical(text)
case 41, 45:
return u.decodeURN(text)
default:
return fmt.Errorf("uuid: incorrect UUID length: %s", text)
}
}
// decodeCanonical decodes UUID strings that are formatted as defined in RFC-4122 (section 3):
// "6ba7b810-9dad-11d1-80b4-00c04fd430c8".
func (u *UUID) decodeCanonical(t []byte) error {
if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' {
return fmt.Errorf("uuid: incorrect UUID format %s", t)
}
src := t
dst := u[:]
for i, byteGroup := range byteGroups {
if i > 0 {
src = src[1:] // skip dash
}
_, err := hex.Decode(dst[:byteGroup/2], src[:byteGroup])
if err != nil {
return err
}
src = src[byteGroup:]
dst = dst[byteGroup/2:]
}
return nil
}
// decodeHashLike decodes UUID strings that are using the following format:
// "6ba7b8109dad11d180b400c04fd430c8".
func (u *UUID) decodeHashLike(t []byte) error {
src := t[:]
dst := u[:]
_, err := hex.Decode(dst, src)
return err
}
// decodeBraced decodes UUID strings that are using the following formats:
// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}"
// "{6ba7b8109dad11d180b400c04fd430c8}".
func (u *UUID) decodeBraced(t []byte) error {
l := len(t)
if t[0] != '{' || t[l-1] != '}' {
return fmt.Errorf("uuid: incorrect UUID format %s", t)
}
return u.decodePlain(t[1 : l-1])
}
// decodeURN decodes UUID strings that are using the following formats:
// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8"
// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8".
func (u *UUID) decodeURN(t []byte) error {
total := len(t)
urnUUIDPrefix := t[:9]
if !bytes.Equal(urnUUIDPrefix, urnPrefix) {
return fmt.Errorf("uuid: incorrect UUID format: %s", t)
}
return u.decodePlain(t[9:total])
}
// decodePlain decodes UUID strings that are using the following formats:
// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format
// "6ba7b8109dad11d180b400c04fd430c8".
func (u *UUID) decodePlain(t []byte) error {
switch len(t) {
case 32:
return u.decodeHashLike(t)
case 36:
return u.decodeCanonical(t)
default:
return fmt.Errorf("uuid: incorrect UUID length: %s", t)
}
}
// MarshalBinary implements the encoding.BinaryMarshaler interface.
func (u UUID) MarshalBinary() ([]byte, error) {
return u.Bytes(), nil
}
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
// It will return an error if the slice isn't 16 bytes long.
func (u *UUID) UnmarshalBinary(data []byte) error {
if len(data) != Size {
return fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data))
}
copy(u[:], data)
return nil
}

47
vendor/github.com/gofrs/uuid/fuzz.go generated vendored Normal file
View file

@ -0,0 +1,47 @@
// Copyright (c) 2018 Andrei Tudor Călin <mail@acln.ro>
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// +build gofuzz
package uuid
// Fuzz implements a simple fuzz test for FromString / UnmarshalText.
//
// To run:
//
// $ go get github.com/dvyukov/go-fuzz/...
// $ cd $GOPATH/src/github.com/gofrs/uuid
// $ go-fuzz-build github.com/gofrs/uuid
// $ go-fuzz -bin=uuid-fuzz.zip -workdir=./testdata
//
// If you make significant changes to FromString / UnmarshalText and add
// new cases to fromStringTests (in codec_test.go), please run
//
// $ go test -seed_fuzz_corpus
//
// to seed the corpus with the new interesting inputs, then run the fuzzer.
func Fuzz(data []byte) int {
_, err := FromString(string(data))
if err != nil {
return 0
}
return 1
}

299
vendor/github.com/gofrs/uuid/generator.go generated vendored Normal file
View file

@ -0,0 +1,299 @@
// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package uuid
import (
"crypto/md5"
"crypto/rand"
"crypto/sha1"
"encoding/binary"
"fmt"
"hash"
"io"
"net"
"os"
"sync"
"time"
)
// Difference in 100-nanosecond intervals between
// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970).
const epochStart = 122192928000000000
type epochFunc func() time.Time
// HWAddrFunc is the function type used to provide hardware (MAC) addresses.
type HWAddrFunc func() (net.HardwareAddr, error)
// DefaultGenerator is the default UUID Generator used by this package.
var DefaultGenerator Generator = NewGen()
var (
posixUID = uint32(os.Getuid())
posixGID = uint32(os.Getgid())
)
// NewV1 returns a UUID based on the current timestamp and MAC address.
func NewV1() (UUID, error) {
return DefaultGenerator.NewV1()
}
// NewV2 returns a DCE Security UUID based on the POSIX UID/GID.
func NewV2(domain byte) (UUID, error) {
return DefaultGenerator.NewV2(domain)
}
// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
func NewV3(ns UUID, name string) UUID {
return DefaultGenerator.NewV3(ns, name)
}
// NewV4 returns a randomly generated UUID.
func NewV4() (UUID, error) {
return DefaultGenerator.NewV4()
}
// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name.
func NewV5(ns UUID, name string) UUID {
return DefaultGenerator.NewV5(ns, name)
}
// Generator provides an interface for generating UUIDs.
type Generator interface {
NewV1() (UUID, error)
NewV2(domain byte) (UUID, error)
NewV3(ns UUID, name string) UUID
NewV4() (UUID, error)
NewV5(ns UUID, name string) UUID
}
// Gen is a reference UUID generator based on the specifications laid out in
// RFC-4122 and DCE 1.1: Authentication and Security Services. This type
// satisfies the Generator interface as defined in this package.
//
// For consumers who are generating V1 UUIDs, but don't want to expose the MAC
// address of the node generating the UUIDs, the NewGenWithHWAF() function has been
// provided as a convenience. See the function's documentation for more info.
//
// The authors of this package do not feel that the majority of users will need
// to obfuscate their MAC address, and so we recommend using NewGen() to create
// a new generator.
type Gen struct {
clockSequenceOnce sync.Once
hardwareAddrOnce sync.Once
storageMutex sync.Mutex
rand io.Reader
epochFunc epochFunc
hwAddrFunc HWAddrFunc
lastTime uint64
clockSequence uint16
hardwareAddr [6]byte
}
// interface check -- build will fail if *Gen doesn't satisfy Generator
var _ Generator = (*Gen)(nil)
// NewGen returns a new instance of Gen with some default values set. Most
// people should use this.
func NewGen() *Gen {
return NewGenWithHWAF(defaultHWAddrFunc)
}
// NewGenWithHWAF builds a new UUID generator with the HWAddrFunc provided. Most
// consumers should use NewGen() instead.
//
// This is used so that consumers can generate their own MAC addresses, for use
// in the generated UUIDs, if there is some concern about exposing the physical
// address of the machine generating the UUID.
//
// The Gen generator will only invoke the HWAddrFunc once, and cache that MAC
// address for all the future UUIDs generated by it. If you'd like to switch the
// MAC address being used, you'll need to create a new generator using this
// function.
func NewGenWithHWAF(hwaf HWAddrFunc) *Gen {
return &Gen{
epochFunc: time.Now,
hwAddrFunc: hwaf,
rand: rand.Reader,
}
}
// NewV1 returns a UUID based on the current timestamp and MAC address.
func (g *Gen) NewV1() (UUID, error) {
u := UUID{}
timeNow, clockSeq, err := g.getClockSequence()
if err != nil {
return Nil, err
}
binary.BigEndian.PutUint32(u[0:], uint32(timeNow))
binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32))
binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48))
binary.BigEndian.PutUint16(u[8:], clockSeq)
hardwareAddr, err := g.getHardwareAddr()
if err != nil {
return Nil, err
}
copy(u[10:], hardwareAddr)
u.SetVersion(V1)
u.SetVariant(VariantRFC4122)
return u, nil
}
// NewV2 returns a DCE Security UUID based on the POSIX UID/GID.
func (g *Gen) NewV2(domain byte) (UUID, error) {
u, err := g.NewV1()
if err != nil {
return Nil, err
}
switch domain {
case DomainPerson:
binary.BigEndian.PutUint32(u[:], posixUID)
case DomainGroup:
binary.BigEndian.PutUint32(u[:], posixGID)
}
u[9] = domain
u.SetVersion(V2)
u.SetVariant(VariantRFC4122)
return u, nil
}
// NewV3 returns a UUID based on the MD5 hash of the namespace UUID and name.
func (g *Gen) NewV3(ns UUID, name string) UUID {
u := newFromHash(md5.New(), ns, name)
u.SetVersion(V3)
u.SetVariant(VariantRFC4122)
return u
}
// NewV4 returns a randomly generated UUID.
func (g *Gen) NewV4() (UUID, error) {
u := UUID{}
if _, err := io.ReadFull(g.rand, u[:]); err != nil {
return Nil, err
}
u.SetVersion(V4)
u.SetVariant(VariantRFC4122)
return u, nil
}
// NewV5 returns a UUID based on SHA-1 hash of the namespace UUID and name.
func (g *Gen) NewV5(ns UUID, name string) UUID {
u := newFromHash(sha1.New(), ns, name)
u.SetVersion(V5)
u.SetVariant(VariantRFC4122)
return u
}
// Returns the epoch and clock sequence.
func (g *Gen) getClockSequence() (uint64, uint16, error) {
var err error
g.clockSequenceOnce.Do(func() {
buf := make([]byte, 2)
if _, err = io.ReadFull(g.rand, buf); err != nil {
return
}
g.clockSequence = binary.BigEndian.Uint16(buf)
})
if err != nil {
return 0, 0, err
}
g.storageMutex.Lock()
defer g.storageMutex.Unlock()
timeNow := g.getEpoch()
// Clock didn't change since last UUID generation.
// Should increase clock sequence.
if timeNow <= g.lastTime {
g.clockSequence++
}
g.lastTime = timeNow
return timeNow, g.clockSequence, nil
}
// Returns the hardware address.
func (g *Gen) getHardwareAddr() ([]byte, error) {
var err error
g.hardwareAddrOnce.Do(func() {
var hwAddr net.HardwareAddr
if hwAddr, err = g.hwAddrFunc(); err == nil {
copy(g.hardwareAddr[:], hwAddr)
return
}
// Initialize hardwareAddr randomly in case
// of real network interfaces absence.
if _, err = io.ReadFull(g.rand, g.hardwareAddr[:]); err != nil {
return
}
// Set multicast bit as recommended by RFC-4122
g.hardwareAddr[0] |= 0x01
})
if err != nil {
return []byte{}, err
}
return g.hardwareAddr[:], nil
}
// Returns the difference between UUID epoch (October 15, 1582)
// and current time in 100-nanosecond intervals.
func (g *Gen) getEpoch() uint64 {
return epochStart + uint64(g.epochFunc().UnixNano()/100)
}
// Returns the UUID based on the hashing of the namespace UUID and name.
func newFromHash(h hash.Hash, ns UUID, name string) UUID {
u := UUID{}
h.Write(ns[:])
h.Write([]byte(name))
copy(u[:], h.Sum(nil))
return u
}
// Returns the hardware address.
func defaultHWAddrFunc() (net.HardwareAddr, error) {
ifaces, err := net.Interfaces()
if err != nil {
return []byte{}, err
}
for _, iface := range ifaces {
if len(iface.HardwareAddr) >= 6 {
return iface.HardwareAddr, nil
}
}
return []byte{}, fmt.Errorf("uuid: no HW address found")
}

109
vendor/github.com/gofrs/uuid/sql.go generated vendored Normal file
View file

@ -0,0 +1,109 @@
// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package uuid
import (
"bytes"
"database/sql/driver"
"encoding/json"
"fmt"
)
// Value implements the driver.Valuer interface.
func (u UUID) Value() (driver.Value, error) {
return u.String(), nil
}
// Scan implements the sql.Scanner interface.
// A 16-byte slice will be handled by UnmarshalBinary, while
// a longer byte slice or a string will be handled by UnmarshalText.
func (u *UUID) Scan(src interface{}) error {
switch src := src.(type) {
case UUID: // support gorm convert from UUID to NullUUID
*u = src
return nil
case []byte:
if len(src) == Size {
return u.UnmarshalBinary(src)
}
return u.UnmarshalText(src)
case string:
return u.UnmarshalText([]byte(src))
}
return fmt.Errorf("uuid: cannot convert %T to UUID", src)
}
// NullUUID can be used with the standard sql package to represent a
// UUID value that can be NULL in the database.
type NullUUID struct {
UUID UUID
Valid bool
}
// Value implements the driver.Valuer interface.
func (u NullUUID) Value() (driver.Value, error) {
if !u.Valid {
return nil, nil
}
// Delegate to UUID Value function
return u.UUID.Value()
}
// Scan implements the sql.Scanner interface.
func (u *NullUUID) Scan(src interface{}) error {
if src == nil {
u.UUID, u.Valid = Nil, false
return nil
}
// Delegate to UUID Scan function
u.Valid = true
return u.UUID.Scan(src)
}
// MarshalJSON marshals the NullUUID as null or the nested UUID
func (u NullUUID) MarshalJSON() ([]byte, error) {
if !u.Valid {
return json.Marshal(nil)
}
return json.Marshal(u.UUID)
}
// UnmarshalJSON unmarshals a NullUUID
func (u *NullUUID) UnmarshalJSON(b []byte) error {
if bytes.Equal(b, []byte("null")) {
u.UUID, u.Valid = Nil, false
return nil
}
if err := json.Unmarshal(b, &u.UUID); err != nil {
return err
}
u.Valid = true
return nil
}

189
vendor/github.com/gofrs/uuid/uuid.go generated vendored Normal file
View file

@ -0,0 +1,189 @@
// Copyright (C) 2013-2018 by Maxim Bublis <b@codemonkey.ru>
//
// Permission is hereby granted, free of charge, to any person obtaining
// a copy of this software and associated documentation files (the
// "Software"), to deal in the Software without restriction, including
// without limitation the rights to use, copy, modify, merge, publish,
// distribute, sublicense, and/or sell copies of the Software, and to
// permit persons to whom the Software is furnished to do so, subject to
// the following conditions:
//
// The above copyright notice and this permission notice shall be
// included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
// Package uuid provides implementations of the Universally Unique Identifier (UUID), as specified in RFC-4122 and DCE 1.1.
//
// RFC-4122[1] provides the specification for versions 1, 3, 4, and 5.
//
// DCE 1.1[2] provides the specification for version 2.
//
// [1] https://tools.ietf.org/html/rfc4122
// [2] http://pubs.opengroup.org/onlinepubs/9696989899/chap5.htm#tagcjh_08_02_01_01
package uuid
import (
"encoding/binary"
"encoding/hex"
"fmt"
"time"
)
// Size of a UUID in bytes.
const Size = 16
// UUID is an array type to represent the value of a UUID, as defined in RFC-4122.
type UUID [Size]byte
// UUID versions.
const (
_ byte = iota
V1 // Version 1 (date-time and MAC address)
V2 // Version 2 (date-time and MAC address, DCE security version)
V3 // Version 3 (namespace name-based)
V4 // Version 4 (random)
V5 // Version 5 (namespace name-based)
)
// UUID layout variants.
const (
VariantNCS byte = iota
VariantRFC4122
VariantMicrosoft
VariantFuture
)
// UUID DCE domains.
const (
DomainPerson = iota
DomainGroup
DomainOrg
)
// Timestamp is the count of 100-nanosecond intervals since 00:00:00.00,
// 15 October 1582 within a V1 UUID. This type has no meaning for V2-V5
// UUIDs since they don't have an embedded timestamp.
type Timestamp uint64
const _100nsPerSecond = 10000000
// Time returns the UTC time.Time representation of a Timestamp
func (t Timestamp) Time() (time.Time, error) {
secs := uint64(t) / _100nsPerSecond
nsecs := 100 * (uint64(t) % _100nsPerSecond)
return time.Unix(int64(secs)-(epochStart/_100nsPerSecond), int64(nsecs)), nil
}
// TimestampFromV1 returns the Timestamp embedded within a V1 UUID.
// Returns an error if the UUID is any version other than 1.
func TimestampFromV1(u UUID) (Timestamp, error) {
if u.Version() != 1 {
err := fmt.Errorf("uuid: %s is version %d, not version 1", u, u.Version())
return 0, err
}
low := binary.BigEndian.Uint32(u[0:4])
mid := binary.BigEndian.Uint16(u[4:6])
hi := binary.BigEndian.Uint16(u[6:8]) & 0xfff
return Timestamp(uint64(low) + (uint64(mid) << 32) + (uint64(hi) << 48)), nil
}
// String parse helpers.
var (
urnPrefix = []byte("urn:uuid:")
byteGroups = []int{8, 4, 4, 4, 12}
)
// Nil is the nil UUID, as specified in RFC-4122, that has all 128 bits set to
// zero.
var Nil = UUID{}
// Predefined namespace UUIDs.
var (
NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
)
// Version returns the algorithm version used to generate the UUID.
func (u UUID) Version() byte {
return u[6] >> 4
}
// Variant returns the UUID layout variant.
func (u UUID) Variant() byte {
switch {
case (u[8] >> 7) == 0x00:
return VariantNCS
case (u[8] >> 6) == 0x02:
return VariantRFC4122
case (u[8] >> 5) == 0x06:
return VariantMicrosoft
case (u[8] >> 5) == 0x07:
fallthrough
default:
return VariantFuture
}
}
// Bytes returns a byte slice representation of the UUID.
func (u UUID) Bytes() []byte {
return u[:]
}
// String returns a canonical RFC-4122 string representation of the UUID:
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx.
func (u UUID) String() string {
buf := make([]byte, 36)
hex.Encode(buf[0:8], u[0:4])
buf[8] = '-'
hex.Encode(buf[9:13], u[4:6])
buf[13] = '-'
hex.Encode(buf[14:18], u[6:8])
buf[18] = '-'
hex.Encode(buf[19:23], u[8:10])
buf[23] = '-'
hex.Encode(buf[24:], u[10:])
return string(buf)
}
// SetVersion sets the version bits.
func (u *UUID) SetVersion(v byte) {
u[6] = (u[6] & 0x0f) | (v << 4)
}
// SetVariant sets the variant bits.
func (u *UUID) SetVariant(v byte) {
switch v {
case VariantNCS:
u[8] = (u[8]&(0xff>>1) | (0x00 << 7))
case VariantRFC4122:
u[8] = (u[8]&(0xff>>2) | (0x02 << 6))
case VariantMicrosoft:
u[8] = (u[8]&(0xff>>3) | (0x06 << 5))
case VariantFuture:
fallthrough
default:
u[8] = (u[8]&(0xff>>3) | (0x07 << 5))
}
}
// Must is a helper that wraps a call to a function returning (UUID, error)
// and panics if the error is non-nil. It is intended for use in variable
// initializations such as
// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000"))
func Must(u UUID, err error) UUID {
if err != nil {
panic(err)
}
return u
}

View file

@ -8,14 +8,15 @@ type Account struct {
LastName string `json:"last_name"` LastName string `json:"last_name"`
Email string `json:"email"` Email string `json:"email"`
Company string `json:"company"` Company string `json:"company"`
Address1 string `json:"address1"` Address1 string `json:"address_1"`
Address2 string `json:"address2"` Address2 string `json:"address_2"`
Balance float32 `json:"balance"` Balance float32 `json:"balance"`
City string `json:"city"` City string `json:"city"`
State string `json:"state"` State string `json:"state"`
Zip string `json:"zip"` Zip string `json:"zip"`
Country string `json:"country"` Country string `json:"country"`
TaxID string `json:"tax_id"` TaxID string `json:"tax_id"`
Phone string `json:"phone"`
CreditCard *CreditCard `json:"credit_card"` CreditCard *CreditCard `json:"credit_card"`
} }

View file

@ -10,13 +10,14 @@ type Notification struct {
UntilStr string `json:"until"` UntilStr string `json:"until"`
WhenStr string `json:"when"` WhenStr string `json:"when"`
Label string `json:"label"` Label string `json:"label"`
Message string `json:"message"` Body *string `json:"body"`
Type string `json:"type"` Message string `json:"message"`
Severity string `json:"severity"` Type NotificationType `json:"type"`
Entity *NotificationEntity `json:"entity"` Severity NotificationSeverity `json:"severity"`
Until *time.Time `json:"-"` Entity *NotificationEntity `json:"entity"`
When *time.Time `json:"-"` Until *time.Time `json:"-"`
When *time.Time `json:"-"`
} }
// NotificationEntity adds detailed information about the Notification. // NotificationEntity adds detailed information about the Notification.
@ -28,6 +29,33 @@ type NotificationEntity struct {
URL string `json:"url"` URL string `json:"url"`
} }
// NotificationSeverity constants start with Notification and include all known Linode API Notification Severities.
type NotificationSeverity string
// NotificationSeverity constants represent the actions that cause a Notification. New severities may be added in the future.
const (
NotificationMinor NotificationSeverity = "minor"
NotificationMajor NotificationSeverity = "major"
NotificationCritical NotificationSeverity = "critical"
)
// NotificationType constants start with Notification and include all known Linode API Notification Types.
type NotificationType string
// NotificationType constants represent the actions that cause a Notification. New types may be added in the future.
const (
NotificationMigrationScheduled NotificationType = "migration_scheduled"
NotificationMigrationImminent NotificationType = "migration_imminent"
NotificationMigrationPending NotificationType = "migration_pending"
NotificationRebootScheduled NotificationType = "reboot_scheduled"
NotificationOutage NotificationType = "outage"
NotificationPaymentDue NotificationType = "payment_due"
NotificationTicketImportant NotificationType = "ticket_important"
NotificationTicketAbuse NotificationType = "ticket_abuse"
NotificationNotice NotificationType = "notice"
NotificationMaintenance NotificationType = "maintenance"
)
// NotificationsPagedResponse represents a paginated Notifications API response // NotificationsPagedResponse represents a paginated Notifications API response
type NotificationsPagedResponse struct { type NotificationsPagedResponse struct {
*PageOptions *PageOptions

164
vendor/github.com/linode/linodego/account_users.go generated vendored Normal file
View file

@ -0,0 +1,164 @@
package linodego
import (
"context"
"encoding/json"
"fmt"
)
// User represents a User object
type User struct {
Username string `json:"username"`
Email string `json:"email"`
Restricted bool `json:"restricted"`
SSHKeys []string `json:"ssh_keys"`
}
// UserCreateOptions fields are those accepted by CreateUser
type UserCreateOptions struct {
Username string `json:"username"`
Email string `json:"email"`
Restricted bool `json:"restricted,omitempty"`
}
// UserUpdateOptions fields are those accepted by UpdateUser
type UserUpdateOptions struct {
Username string `json:"username,omitempty"`
Email string `json:"email,omitempty"`
Restricted *bool `json:"restricted,omitempty"`
SSHKeys *[]string `json:"ssh_keys,omitempty"`
}
// GetCreateOptions converts a User to UserCreateOptions for use in CreateUser
func (i User) GetCreateOptions() (o UserCreateOptions) {
o.Username = i.Username
o.Email = i.Email
o.Restricted = i.Restricted
return
}
// GetUpdateOptions converts a User to UserUpdateOptions for use in UpdateUser
func (i User) GetUpdateOptions() (o UserUpdateOptions) {
o.Username = i.Username
o.Email = i.Email
o.Restricted = copyBool(&i.Restricted)
return
}
// UsersPagedResponse represents a paginated User API response
type UsersPagedResponse struct {
*PageOptions
Data []User `json:"data"`
}
// endpoint gets the endpoint URL for User
func (UsersPagedResponse) endpoint(c *Client) string {
endpoint, err := c.Users.Endpoint()
if err != nil {
panic(err)
}
return endpoint
}
// appendData appends Users when processing paginated User responses
func (resp *UsersPagedResponse) appendData(r *UsersPagedResponse) {
resp.Data = append(resp.Data, r.Data...)
}
// ListUsers lists Users on the account
func (c *Client) ListUsers(ctx context.Context, opts *ListOptions) ([]User, error) {
response := UsersPagedResponse{}
err := c.listHelper(ctx, &response, opts)
for i := range response.Data {
response.Data[i].fixDates()
}
if err != nil {
return nil, err
}
return response.Data, nil
}
// fixDates converts JSON timestamps to Go time.Time values
func (i *User) fixDates() *User {
return i
}
// GetUser gets the user with the provided ID
func (c *Client) GetUser(ctx context.Context, id string) (*User, error) {
e, err := c.Users.Endpoint()
if err != nil {
return nil, err
}
e = fmt.Sprintf("%s/%s", e, id)
r, err := coupleAPIErrors(c.R(ctx).SetResult(&User{}).Get(e))
if err != nil {
return nil, err
}
return r.Result().(*User).fixDates(), nil
}
// CreateUser creates a User. The email address must be confirmed before the
// User account can be accessed.
func (c *Client) CreateUser(ctx context.Context, createOpts UserCreateOptions) (*User, error) {
var body string
e, err := c.Users.Endpoint()
if err != nil {
return nil, err
}
req := c.R(ctx).SetResult(&User{})
if bodyData, err := json.Marshal(createOpts); err == nil {
body = string(bodyData)
} else {
return nil, NewError(err)
}
r, err := coupleAPIErrors(req.
SetBody(body).
Post(e))
if err != nil {
return nil, err
}
return r.Result().(*User).fixDates(), nil
}
// UpdateUser updates the User with the specified id
func (c *Client) UpdateUser(ctx context.Context, id string, updateOpts UserUpdateOptions) (*User, error) {
var body string
e, err := c.Users.Endpoint()
if err != nil {
return nil, err
}
e = fmt.Sprintf("%s/%s", e, id)
req := c.R(ctx).SetResult(&User{})
if bodyData, err := json.Marshal(updateOpts); err == nil {
body = string(bodyData)
} else {
return nil, NewError(err)
}
r, err := coupleAPIErrors(req.
SetBody(body).
Put(e))
if err != nil {
return nil, err
}
return r.Result().(*User).fixDates(), nil
}
// DeleteUser deletes the User with the specified id
func (c *Client) DeleteUser(ctx context.Context, id string) error {
e, err := c.Users.Endpoint()
if err != nil {
return err
}
e = fmt.Sprintf("%s/%s", e, id)
_, err = coupleAPIErrors(c.R(ctx).Delete(e))
return err
}

View file

@ -7,8 +7,9 @@ import (
"net/http" "net/http"
"os" "os"
"strconv" "strconv"
"time"
"github.com/go-resty/resty" "gopkg.in/resty.v1"
) )
const ( const (
@ -19,17 +20,18 @@ const (
// APIProto connect to API with http(s) // APIProto connect to API with http(s)
APIProto = "https" APIProto = "https"
// Version of linodego // Version of linodego
Version = "0.5.1" Version = "0.7.0"
// APIEnvVar environment var to check for API token // APIEnvVar environment var to check for API token
APIEnvVar = "LINODE_TOKEN" APIEnvVar = "LINODE_TOKEN"
// APISecondsPerPoll how frequently to poll for new Events // APISecondsPerPoll how frequently to poll for new Events or Status in WaitFor functions
APISecondsPerPoll = 10 APISecondsPerPoll = 3
// DefaultUserAgent is the default User-Agent sent in HTTP request headers
DefaultUserAgent = "linodego " + Version + " https://github.com/linode/linodego"
) )
// DefaultUserAgent is the default User-Agent sent in HTTP request headers var (
const DefaultUserAgent = "linodego " + Version + " https://github.com/linode/linodego" envDebug = false
)
var envDebug = false
// Client is a wrapper around the Resty client // Client is a wrapper around the Resty client
type Client struct { type Client struct {
@ -38,6 +40,8 @@ type Client struct {
resources map[string]*Resource resources map[string]*Resource
debug bool debug bool
millisecondsPerPoll time.Duration
Images *Resource Images *Resource
InstanceDisks *Resource InstanceDisks *Resource
InstanceConfigs *Resource InstanceConfigs *Resource
@ -63,6 +67,8 @@ type Client struct {
NodeBalancerNodes *Resource NodeBalancerNodes *Resource
SSHKeys *Resource SSHKeys *Resource
Tickets *Resource Tickets *Resource
Tokens *Resource
Token *Resource
Account *Resource Account *Resource
Invoices *Resource Invoices *Resource
InvoiceItems *Resource InvoiceItems *Resource
@ -70,6 +76,8 @@ type Client struct {
Notifications *Resource Notifications *Resource
Profile *Resource Profile *Resource
Managed *Resource Managed *Resource
Tags *Resource
Users *Resource
} }
func init() { func init() {
@ -115,6 +123,13 @@ func (c *Client) SetBaseURL(url string) *Client {
return c return c
} }
// SetPollDelay sets the number of milliseconds to wait between events or status polls.
// Affects all WaitFor* functions.
func (c *Client) SetPollDelay(delay time.Duration) *Client {
c.millisecondsPerPoll = delay
return c
}
// Resource looks up a resource by name // Resource looks up a resource by name
func (c Client) Resource(resourceName string) *Resource { func (c Client) Resource(resourceName string) *Resource {
selectedResource, ok := c.resources[resourceName] selectedResource, ok := c.resources[resourceName]
@ -130,6 +145,7 @@ func NewClient(hc *http.Client) (client Client) {
client.resty = restyClient client.resty = restyClient
client.SetUserAgent(DefaultUserAgent) client.SetUserAgent(DefaultUserAgent)
client.SetBaseURL(fmt.Sprintf("%s://%s/%s", APIProto, APIHost, APIVersion)) client.SetBaseURL(fmt.Sprintf("%s://%s/%s", APIProto, APIHost, APIVersion))
client.SetPollDelay(1000 * APISecondsPerPoll)
resources := map[string]*Resource{ resources := map[string]*Resource{
stackscriptsName: NewResource(&client, stackscriptsName, stackscriptsEndpoint, false, Stackscript{}, StackscriptsPagedResponse{}), stackscriptsName: NewResource(&client, stackscriptsName, stackscriptsEndpoint, false, Stackscript{}, StackscriptsPagedResponse{}),
@ -155,14 +171,18 @@ func NewClient(hc *http.Client) (client Client) {
nodebalancersName: NewResource(&client, nodebalancersName, nodebalancersEndpoint, false, NodeBalancer{}, NodeBalancerConfigsPagedResponse{}), nodebalancersName: NewResource(&client, nodebalancersName, nodebalancersEndpoint, false, NodeBalancer{}, NodeBalancerConfigsPagedResponse{}),
nodebalancerconfigsName: NewResource(&client, nodebalancerconfigsName, nodebalancerconfigsEndpoint, true, NodeBalancerConfig{}, NodeBalancerConfigsPagedResponse{}), nodebalancerconfigsName: NewResource(&client, nodebalancerconfigsName, nodebalancerconfigsEndpoint, true, NodeBalancerConfig{}, NodeBalancerConfigsPagedResponse{}),
nodebalancernodesName: NewResource(&client, nodebalancernodesName, nodebalancernodesEndpoint, true, NodeBalancerNode{}, NodeBalancerNodesPagedResponse{}), nodebalancernodesName: NewResource(&client, nodebalancernodesName, nodebalancernodesEndpoint, true, NodeBalancerNode{}, NodeBalancerNodesPagedResponse{}),
notificationsName: NewResource(&client, notificationsName, notificationsEndpoint, false, Notification{}, NotificationsPagedResponse{}),
sshkeysName: NewResource(&client, sshkeysName, sshkeysEndpoint, false, SSHKey{}, SSHKeysPagedResponse{}), sshkeysName: NewResource(&client, sshkeysName, sshkeysEndpoint, false, SSHKey{}, SSHKeysPagedResponse{}),
ticketsName: NewResource(&client, ticketsName, ticketsEndpoint, false, Ticket{}, TicketsPagedResponse{}), ticketsName: NewResource(&client, ticketsName, ticketsEndpoint, false, Ticket{}, TicketsPagedResponse{}),
tokensName: NewResource(&client, tokensName, tokensEndpoint, false, Token{}, TokensPagedResponse{}),
accountName: NewResource(&client, accountName, accountEndpoint, false, Account{}, nil), // really? accountName: NewResource(&client, accountName, accountEndpoint, false, Account{}, nil), // really?
eventsName: NewResource(&client, eventsName, eventsEndpoint, false, Event{}, EventsPagedResponse{}), eventsName: NewResource(&client, eventsName, eventsEndpoint, false, Event{}, EventsPagedResponse{}),
invoicesName: NewResource(&client, invoicesName, invoicesEndpoint, false, Invoice{}, InvoicesPagedResponse{}), invoicesName: NewResource(&client, invoicesName, invoicesEndpoint, false, Invoice{}, InvoicesPagedResponse{}),
invoiceItemsName: NewResource(&client, invoiceItemsName, invoiceItemsEndpoint, true, InvoiceItem{}, InvoiceItemsPagedResponse{}), invoiceItemsName: NewResource(&client, invoiceItemsName, invoiceItemsEndpoint, true, InvoiceItem{}, InvoiceItemsPagedResponse{}),
profileName: NewResource(&client, profileName, profileEndpoint, false, nil, nil), // really? profileName: NewResource(&client, profileName, profileEndpoint, false, nil, nil), // really?
managedName: NewResource(&client, managedName, managedEndpoint, false, nil, nil), // really? managedName: NewResource(&client, managedName, managedEndpoint, false, nil, nil), // really?
tagsName: NewResource(&client, tagsName, tagsEndpoint, false, Tag{}, TagsPagedResponse{}),
usersName: NewResource(&client, usersName, usersEndpoint, false, User{}, UsersPagedResponse{}),
} }
client.resources = resources client.resources = resources
@ -190,12 +210,48 @@ func NewClient(hc *http.Client) (client Client) {
client.NodeBalancers = resources[nodebalancersName] client.NodeBalancers = resources[nodebalancersName]
client.NodeBalancerConfigs = resources[nodebalancerconfigsName] client.NodeBalancerConfigs = resources[nodebalancerconfigsName]
client.NodeBalancerNodes = resources[nodebalancernodesName] client.NodeBalancerNodes = resources[nodebalancernodesName]
client.Notifications = resources[notificationsName]
client.SSHKeys = resources[sshkeysName] client.SSHKeys = resources[sshkeysName]
client.Tickets = resources[ticketsName] client.Tickets = resources[ticketsName]
client.Tokens = resources[tokensName]
client.Account = resources[accountName] client.Account = resources[accountName]
client.Events = resources[eventsName] client.Events = resources[eventsName]
client.Invoices = resources[invoicesName] client.Invoices = resources[invoicesName]
client.Profile = resources[profileName] client.Profile = resources[profileName]
client.Managed = resources[managedName] client.Managed = resources[managedName]
client.Tags = resources[tagsName]
client.Users = resources[usersName]
return return
} }
func copyBool(bPtr *bool) *bool {
if bPtr == nil {
return nil
}
var t = *bPtr
return &t
}
func copyInt(iPtr *int) *int {
if iPtr == nil {
return nil
}
var t = *iPtr
return &t
}
func copyString(sPtr *string) *string {
if sPtr == nil {
return nil
}
var t = *sPtr
return &t
}
func copyTime(tPtr *time.Time) *time.Time {
if tPtr == nil {
return nil
}
var t = *tPtr
return &t
}

View file

@ -80,22 +80,6 @@ func (d DomainRecord) GetUpdateOptions() (du DomainRecordUpdateOptions) {
return return
} }
func copyInt(iPtr *int) *int {
if iPtr == nil {
return nil
}
var t = *iPtr
return &t
}
func copyString(sPtr *string) *string {
if sPtr == nil {
return nil
}
var t = *sPtr
return &t
}
// DomainRecordsPagedResponse represents a paginated DomainRecord API response // DomainRecordsPagedResponse represents a paginated DomainRecord API response
type DomainRecordsPagedResponse struct { type DomainRecordsPagedResponse struct {
*PageOptions *PageOptions

View file

@ -39,6 +39,9 @@ type Domain struct {
// The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it. // The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it.
AXfrIPs []string `json:"axfr_ips"` AXfrIPs []string `json:"axfr_ips"`
// An array of tags applied to this object. Tags are for organizational purposes only.
Tags []string `json:"tags"`
// The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value. // The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
ExpireSec int `json:"expire_sec"` ExpireSec int `json:"expire_sec"`
@ -81,6 +84,9 @@ type DomainCreateOptions struct {
// The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it. // The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it.
AXfrIPs []string `json:"axfr_ips,omitempty"` AXfrIPs []string `json:"axfr_ips,omitempty"`
// An array of tags applied to this object. Tags are for organizational purposes only.
Tags []string `json:"tags"`
// The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value. // The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
ExpireSec int `json:"expire_sec,omitempty"` ExpireSec int `json:"expire_sec,omitempty"`
@ -123,6 +129,9 @@ type DomainUpdateOptions struct {
// The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it. // The list of IPs that may perform a zone transfer for this Domain. This is potentially dangerous, and should be set to an empty list unless you intend to use it.
AXfrIPs []string `json:"axfr_ips,omitempty"` AXfrIPs []string `json:"axfr_ips,omitempty"`
// An array of tags applied to this object. Tags are for organizational purposes only.
Tags []string `json:"tags"`
// The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value. // The amount of time in seconds that may pass before this Domain is no longer authoritative. Valid values are 300, 3600, 7200, 14400, 28800, 57600, 86400, 172800, 345600, 604800, 1209600, and 2419200 - any other value will be rounded to the nearest valid value.
ExpireSec int `json:"expire_sec,omitempty"` ExpireSec int `json:"expire_sec,omitempty"`
@ -164,6 +173,7 @@ func (d Domain) GetUpdateOptions() (du DomainUpdateOptions) {
du.RetrySec = d.RetrySec du.RetrySec = d.RetrySec
du.MasterIPs = d.MasterIPs du.MasterIPs = d.MasterIPs
du.AXfrIPs = d.AXfrIPs du.AXfrIPs = d.AXfrIPs
du.Tags = d.Tags
du.ExpireSec = d.ExpireSec du.ExpireSec = d.ExpireSec
du.RefreshSec = d.RefreshSec du.RefreshSec = d.RefreshSec
du.TTLSec = d.TTLSec du.TTLSec = d.TTLSec

View file

@ -6,7 +6,7 @@ import (
"net/http" "net/http"
"strings" "strings"
"github.com/go-resty/resty" "gopkg.in/resty.v1"
) )
const ( const (

View file

@ -10,7 +10,7 @@ import (
// Image represents a deployable Image object for use with Linode Instances // Image represents a deployable Image object for use with Linode Instances
type Image struct { type Image struct {
CreatedStr string `json:"created"` CreatedStr string `json:"created"`
UpdatedStr string `json:"updated"` ExpiryStr string `json:"expiry"`
ID string `json:"id"` ID string `json:"id"`
CreatedBy string `json:"created_by"` CreatedBy string `json:"created_by"`
Label string `json:"label"` Label string `json:"label"`
@ -22,7 +22,7 @@ type Image struct {
Deprecated bool `json:"deprecated"` Deprecated bool `json:"deprecated"`
Created *time.Time `json:"-"` Created *time.Time `json:"-"`
Updated *time.Time `json:"-"` Expiry *time.Time `json:"-"`
} }
// ImageCreateOptions fields are those accepted by CreateImage // ImageCreateOptions fields are those accepted by CreateImage
@ -40,7 +40,12 @@ type ImageUpdateOptions struct {
func (i *Image) fixDates() *Image { func (i *Image) fixDates() *Image {
i.Created, _ = parseDates(i.CreatedStr) i.Created, _ = parseDates(i.CreatedStr)
i.Updated, _ = parseDates(i.UpdatedStr)
if len(i.ExpiryStr) > 0 {
i.Expiry, _ = parseDates(i.ExpiryStr)
} else {
i.Expiry = nil
}
return i return i
} }
@ -94,7 +99,7 @@ func (c *Client) GetImage(ctx context.Context, id string) (*Image, error) {
if err != nil { if err != nil {
return nil, err return nil, err
} }
return r.Result().(*Image), nil return r.Result().(*Image).fixDates(), nil
} }
// CreateImage creates a Image // CreateImage creates a Image

View file

@ -14,7 +14,7 @@ type InstanceDisk struct {
ID int `json:"id"` ID int `json:"id"`
Label string `json:"label"` Label string `json:"label"`
Status string `json:"status"` Status DiskStatus `json:"status"`
Size int `json:"size"` Size int `json:"size"`
Filesystem DiskFilesystem `json:"filesystem"` Filesystem DiskFilesystem `json:"filesystem"`
Created time.Time `json:"-"` Created time.Time `json:"-"`
@ -33,6 +33,16 @@ const (
FilesystemInitrd DiskFilesystem = "initrd" FilesystemInitrd DiskFilesystem = "initrd"
) )
// DiskStatus constants have the prefix "Disk" and include Linode API Instance Disk Status
type DiskStatus string
// DiskStatus constants represent the status values an Instance Disk may have
const (
DiskReady DiskStatus = "ready"
DiskNotReady DiskStatus = "not ready"
DiskDeleting DiskStatus = "deleting"
)
// InstanceDisksPagedResponse represents a paginated InstanceDisk API response // InstanceDisksPagedResponse represents a paginated InstanceDisk API response
type InstanceDisksPagedResponse struct { type InstanceDisksPagedResponse struct {
*PageOptions *PageOptions
@ -175,13 +185,13 @@ func (c *Client) RenameInstanceDisk(ctx context.Context, linodeID int, diskID in
} }
// ResizeInstanceDisk resizes the size of the Instance disk // ResizeInstanceDisk resizes the size of the Instance disk
func (c *Client) ResizeInstanceDisk(ctx context.Context, linodeID int, diskID int, size int) (*InstanceDisk, error) { func (c *Client) ResizeInstanceDisk(ctx context.Context, linodeID int, diskID int, size int) error {
var body string var body string
e, err := c.InstanceDisks.endpointWithID(linodeID) e, err := c.InstanceDisks.endpointWithID(linodeID)
if err != nil { if err != nil {
return nil, err return err
} }
e = fmt.Sprintf("%s/%d", e, diskID) e = fmt.Sprintf("%s/%d/resize", e, diskID)
req := c.R(ctx).SetResult(&InstanceDisk{}) req := c.R(ctx).SetResult(&InstanceDisk{})
updateOpts := map[string]interface{}{ updateOpts := map[string]interface{}{
@ -191,17 +201,41 @@ func (c *Client) ResizeInstanceDisk(ctx context.Context, linodeID int, diskID in
if bodyData, err := json.Marshal(updateOpts); err == nil { if bodyData, err := json.Marshal(updateOpts); err == nil {
body = string(bodyData) body = string(bodyData)
} else { } else {
return nil, NewError(err) return NewError(err)
} }
r, err := coupleAPIErrors(req. _, err = coupleAPIErrors(req.
SetBody(body). SetBody(body).
Post(e)) Post(e))
return err
}
// PasswordResetInstanceDisk resets the "root" account password on the Instance disk
func (c *Client) PasswordResetInstanceDisk(ctx context.Context, linodeID int, diskID int, password string) error {
var body string
e, err := c.InstanceDisks.endpointWithID(linodeID)
if err != nil { if err != nil {
return nil, err return err
} }
return r.Result().(*InstanceDisk).fixDates(), nil e = fmt.Sprintf("%s/%d/password", e, diskID)
req := c.R(ctx).SetResult(&InstanceDisk{})
updateOpts := map[string]interface{}{
"password": password,
}
if bodyData, err := json.Marshal(updateOpts); err == nil {
body = string(bodyData)
} else {
return NewError(err)
}
_, err = coupleAPIErrors(req.
SetBody(body).
Post(e))
return err
} }
// DeleteInstanceDisk deletes a Linode Instance Disk // DeleteInstanceDisk deletes a Linode Instance Disk

View file

@ -36,21 +36,23 @@ type Instance struct {
CreatedStr string `json:"created"` CreatedStr string `json:"created"`
UpdatedStr string `json:"updated"` UpdatedStr string `json:"updated"`
ID int `json:"id"` ID int `json:"id"`
Created *time.Time `json:"-"` Created *time.Time `json:"-"`
Updated *time.Time `json:"-"` Updated *time.Time `json:"-"`
Region string `json:"region"` Region string `json:"region"`
Alerts *InstanceAlert `json:"alerts"` Alerts *InstanceAlert `json:"alerts"`
Backups *InstanceBackup `json:"backups"` Backups *InstanceBackup `json:"backups"`
Image string `json:"image"` Image string `json:"image"`
Group string `json:"group"` Group string `json:"group"`
IPv4 []*net.IP `json:"ipv4"` IPv4 []*net.IP `json:"ipv4"`
IPv6 string `json:"ipv6"` IPv6 string `json:"ipv6"`
Label string `json:"label"` Label string `json:"label"`
Type string `json:"type"` Type string `json:"type"`
Status InstanceStatus `json:"status"` Status InstanceStatus `json:"status"`
Hypervisor string `json:"hypervisor"` Hypervisor string `json:"hypervisor"`
Specs *InstanceSpec `json:"specs"` Specs *InstanceSpec `json:"specs"`
WatchdogEnabled bool `json:"watchdog_enabled"`
Tags []string `json:"tags"`
} }
// InstanceSpec represents a linode spec // InstanceSpec represents a linode spec
@ -94,6 +96,7 @@ type InstanceCreateOptions struct {
Image string `json:"image,omitempty"` Image string `json:"image,omitempty"`
BackupsEnabled bool `json:"backups_enabled,omitempty"` BackupsEnabled bool `json:"backups_enabled,omitempty"`
PrivateIP bool `json:"private_ip,omitempty"` PrivateIP bool `json:"private_ip,omitempty"`
Tags []string `json:"tags,omitempty"`
// Creation fields that need to be set explicitly false, "", or 0 use pointers // Creation fields that need to be set explicitly false, "", or 0 use pointers
SwapSize *int `json:"swap_size,omitempty"` SwapSize *int `json:"swap_size,omitempty"`
@ -107,6 +110,19 @@ type InstanceUpdateOptions struct {
Backups *InstanceBackup `json:"backups,omitempty"` Backups *InstanceBackup `json:"backups,omitempty"`
Alerts *InstanceAlert `json:"alerts,omitempty"` Alerts *InstanceAlert `json:"alerts,omitempty"`
WatchdogEnabled *bool `json:"watchdog_enabled,omitempty"` WatchdogEnabled *bool `json:"watchdog_enabled,omitempty"`
Tags *[]string `json:"tags,omitempty"`
}
// GetUpdateOptions converts an Instance to InstanceUpdateOptions for use in UpdateInstance
func (l *Instance) GetUpdateOptions() InstanceUpdateOptions {
return InstanceUpdateOptions{
Label: l.Label,
Group: l.Group,
Backups: l.Backups,
Alerts: l.Alerts,
WatchdogEnabled: &l.WatchdogEnabled,
Tags: &l.Tags,
}
} }
// InstanceCloneOptions is an options struct sent when Cloning an Instance // InstanceCloneOptions is an options struct sent when Cloning an Instance

View file

@ -2,6 +2,7 @@ package linodego
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
) )
@ -11,6 +12,18 @@ type IPAddressesPagedResponse struct {
Data []InstanceIP `json:"data"` Data []InstanceIP `json:"data"`
} }
// IPAddressUpdateOptions fields are those accepted by UpdateToken
type IPAddressUpdateOptions struct {
// The reverse DNS assigned to this address. For public IPv4 addresses, this will be set to a default value provided by Linode if set to nil.
RDNS *string `json:"rdns"`
}
// GetUpdateOptions converts a IPAddress to IPAddressUpdateOptions for use in UpdateIPAddress
func (i InstanceIP) GetUpdateOptions() (o IPAddressUpdateOptions) {
o.RDNS = copyString(&i.RDNS)
return
}
// endpoint gets the endpoint URL for IPAddress // endpoint gets the endpoint URL for IPAddress
func (IPAddressesPagedResponse) endpoint(c *Client) string { func (IPAddressesPagedResponse) endpoint(c *Client) string {
endpoint, err := c.IPAddresses.Endpoint() endpoint, err := c.IPAddresses.Endpoint()
@ -48,3 +61,30 @@ func (c *Client) GetIPAddress(ctx context.Context, id string) (*InstanceIP, erro
} }
return r.Result().(*InstanceIP), nil return r.Result().(*InstanceIP), nil
} }
// UpdateIPAddress updates the IPAddress with the specified id
func (c *Client) UpdateIPAddress(ctx context.Context, id string, updateOpts IPAddressUpdateOptions) (*InstanceIP, error) {
var body string
e, err := c.IPAddresses.Endpoint()
if err != nil {
return nil, err
}
e = fmt.Sprintf("%s/%s", e, id)
req := c.R(ctx).SetResult(&InstanceIP{})
if bodyData, err := json.Marshal(updateOpts); err == nil {
body = string(bodyData)
} else {
return nil, NewError(err)
}
r, err := coupleAPIErrors(req.
SetBody(body).
Put(e))
if err != nil {
return nil, err
}
return r.Result().(*InstanceIP), nil
}

View file

@ -28,6 +28,9 @@ type NodeBalancer struct {
// Information about the amount of transfer this NodeBalancer has had so far this month. // Information about the amount of transfer this NodeBalancer has had so far this month.
Transfer NodeBalancerTransfer `json:"transfer"` Transfer NodeBalancerTransfer `json:"transfer"`
// An array of tags applied to this object. Tags are for organizational purposes only.
Tags []string `json:"tags"`
Created *time.Time `json:"-"` Created *time.Time `json:"-"`
Updated *time.Time `json:"-"` Updated *time.Time `json:"-"`
} }
@ -48,12 +51,14 @@ type NodeBalancerCreateOptions struct {
Region string `json:"region,omitempty"` Region string `json:"region,omitempty"`
ClientConnThrottle *int `json:"client_conn_throttle,omitempty"` ClientConnThrottle *int `json:"client_conn_throttle,omitempty"`
Configs []*NodeBalancerConfigCreateOptions `json:"configs,omitempty"` Configs []*NodeBalancerConfigCreateOptions `json:"configs,omitempty"`
Tags []string `json:"tags"`
} }
// NodeBalancerUpdateOptions are the options permitted for UpdateNodeBalancer // NodeBalancerUpdateOptions are the options permitted for UpdateNodeBalancer
type NodeBalancerUpdateOptions struct { type NodeBalancerUpdateOptions struct {
Label *string `json:"label,omitempty"` Label *string `json:"label,omitempty"`
ClientConnThrottle *int `json:"client_conn_throttle,omitempty"` ClientConnThrottle *int `json:"client_conn_throttle,omitempty"`
Tags *[]string `json:"tags,omitempty"`
} }
// GetCreateOptions converts a NodeBalancer to NodeBalancerCreateOptions for use in CreateNodeBalancer // GetCreateOptions converts a NodeBalancer to NodeBalancerCreateOptions for use in CreateNodeBalancer
@ -62,6 +67,7 @@ func (i NodeBalancer) GetCreateOptions() NodeBalancerCreateOptions {
Label: i.Label, Label: i.Label,
Region: i.Region, Region: i.Region,
ClientConnThrottle: &i.ClientConnThrottle, ClientConnThrottle: &i.ClientConnThrottle,
Tags: i.Tags,
} }
} }
@ -70,6 +76,7 @@ func (i NodeBalancer) GetUpdateOptions() NodeBalancerUpdateOptions {
return NodeBalancerUpdateOptions{ return NodeBalancerUpdateOptions{
Label: i.Label, Label: i.Label,
ClientConnThrottle: &i.ClientConnThrottle, ClientConnThrottle: &i.ClientConnThrottle,
Tags: &i.Tags,
} }
} }

View file

@ -139,7 +139,7 @@ func (i NodeBalancerConfig) GetCreateOptions() NodeBalancerConfigCreateOptions {
CheckTimeout: i.CheckTimeout, CheckTimeout: i.CheckTimeout,
CheckPath: i.CheckPath, CheckPath: i.CheckPath,
CheckBody: i.CheckBody, CheckBody: i.CheckBody,
CheckPassive: &i.CheckPassive, CheckPassive: copyBool(&i.CheckPassive),
CipherSuite: i.CipherSuite, CipherSuite: i.CipherSuite,
SSLCert: i.SSLCert, SSLCert: i.SSLCert,
SSLKey: i.SSLKey, SSLKey: i.SSLKey,
@ -158,7 +158,7 @@ func (i NodeBalancerConfig) GetUpdateOptions() NodeBalancerConfigUpdateOptions {
CheckAttempts: i.CheckAttempts, CheckAttempts: i.CheckAttempts,
CheckPath: i.CheckPath, CheckPath: i.CheckPath,
CheckBody: i.CheckBody, CheckBody: i.CheckBody,
CheckPassive: &i.CheckPassive, CheckPassive: copyBool(&i.CheckPassive),
CheckTimeout: i.CheckTimeout, CheckTimeout: i.CheckTimeout,
CipherSuite: i.CipherSuite, CipherSuite: i.CipherSuite,
SSLCert: i.SSLCert, SSLCert: i.SSLCert,
@ -179,7 +179,7 @@ func (i NodeBalancerConfig) GetRebuildOptions() NodeBalancerConfigRebuildOptions
CheckTimeout: i.CheckTimeout, CheckTimeout: i.CheckTimeout,
CheckPath: i.CheckPath, CheckPath: i.CheckPath,
CheckBody: i.CheckBody, CheckBody: i.CheckBody,
CheckPassive: &i.CheckPassive, CheckPassive: copyBool(&i.CheckPassive),
CipherSuite: i.CipherSuite, CipherSuite: i.CipherSuite,
SSLCert: i.SSLCert, SSLCert: i.SSLCert,
SSLKey: i.SSLKey, SSLKey: i.SSLKey,

View file

@ -10,7 +10,7 @@ import (
"log" "log"
"strconv" "strconv"
"github.com/go-resty/resty" "gopkg.in/resty.v1"
) )
// PageOptions are the pagination parameters for List endpoints // PageOptions are the pagination parameters for List endpoints
@ -179,95 +179,34 @@ func (c *Client) listHelper(ctx context.Context, i interface{}, opts *ListOption
results = r.Result().(*NodeBalancersPagedResponse).Results results = r.Result().(*NodeBalancersPagedResponse).Results
v.appendData(r.Result().(*NodeBalancersPagedResponse)) v.appendData(r.Result().(*NodeBalancersPagedResponse))
} }
case *TagsPagedResponse:
if r, err = coupleAPIErrors(req.SetResult(TagsPagedResponse{}).Get(v.endpoint(c))); err == nil {
pages = r.Result().(*TagsPagedResponse).Pages
results = r.Result().(*TagsPagedResponse).Results
v.appendData(r.Result().(*TagsPagedResponse))
}
case *TokensPagedResponse:
if r, err = coupleAPIErrors(req.SetResult(TokensPagedResponse{}).Get(v.endpoint(c))); err == nil {
pages = r.Result().(*TokensPagedResponse).Pages
results = r.Result().(*TokensPagedResponse).Results
v.appendData(r.Result().(*TokensPagedResponse))
}
case *UsersPagedResponse:
if r, err = coupleAPIErrors(req.SetResult(UsersPagedResponse{}).Get(v.endpoint(c))); err == nil {
pages = r.Result().(*UsersPagedResponse).Pages
results = r.Result().(*UsersPagedResponse).Results
v.appendData(r.Result().(*UsersPagedResponse))
}
/** /**
case AccountOauthClientsPagedResponse: case AccountOauthClientsPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*AccountOauthClientsPagedResponse).Pages
results = r.Result().(*AccountOauthClientsPagedResponse).Results
v.appendData(r.Result().(*AccountOauthClientsPagedResponse))
}
case AccountPaymentsPagedResponse: case AccountPaymentsPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*AccountPaymentsPagedResponse).Pages
results = r.Result().(*AccountPaymentsPagedResponse).Results
v.appendData(r.Result().(*AccountPaymentsPagedResponse))
}
case AccountUsersPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*AccountUsersPagedResponse).Pages
results = r.Result().(*AccountUsersPagedResponse).Results
v.appendData(r.Result().(*AccountUsersPagedResponse))
}
case ProfileAppsPagedResponse: case ProfileAppsPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*ProfileAppsPagedResponse).Pages
results = r.Result().(*ProfileAppsPagedResponse).Results
v.appendData(r.Result().(*ProfileAppsPagedResponse))
}
case ProfileTokensPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*ProfileTokensPagedResponse).Pages
results = r.Result().(*ProfileTokensPagedResponse).Results
v.appendData(r.Result().(*ProfileTokensPagedResponse))
}
case ProfileWhitelistPagedResponse: case ProfileWhitelistPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*ProfileWhitelistPagedResponse).Pages
results = r.Result().(*ProfileWhitelistPagedResponse).Results
v.appendData(r.Result().(*ProfileWhitelistPagedResponse))
}
case ManagedContactsPagedResponse: case ManagedContactsPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*ManagedContactsPagedResponse).Pages
results = r.Result().(*ManagedContactsPagedResponse).Results
v.appendData(r.Result().(*ManagedContactsPagedResponse))
}
case ManagedCredentialsPagedResponse: case ManagedCredentialsPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*ManagedCredentialsPagedResponse).Pages
results = r.Result().(*ManagedCredentialsPagedResponse).Results
v.appendData(r.Result().(*ManagedCredentialsPagedResponse))
}
case ManagedIssuesPagedResponse: case ManagedIssuesPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*ManagedIssuesPagedResponse).Pages
results = r.Result().(*ManagedIssuesPagedResponse).Results
v.appendData(r.Result().(*ManagedIssuesPagedResponse))
}
case ManagedLinodeSettingsPagedResponse: case ManagedLinodeSettingsPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*ManagedLinodeSettingsPagedResponse).Pages
results = r.Result().(*ManagedLinodeSettingsPagedResponse).Results
v.appendData(r.Result().(*ManagedLinodeSettingsPagedResponse))
}
case ManagedServicesPagedResponse: case ManagedServicesPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {
return NewError(r)
} else if err == nil {
pages = r.Result().(*ManagedServicesPagedResponse).Pages
results = r.Result().(*ManagedServicesPagedResponse).Results
v.appendData(r.Result().(*ManagedServicesPagedResponse))
}
**/ **/
default: default:
log.Fatalf("listHelper interface{} %+v used", i) log.Fatalf("listHelper interface{} %+v used", i)
@ -308,7 +247,7 @@ func (c *Client) listHelper(ctx context.Context, i interface{}, opts *ListOption
// When opts (or opts.Page) is nil, all pages will be fetched and // When opts (or opts.Page) is nil, all pages will be fetched and
// returned in a single (endpoint-specific)PagedResponse // returned in a single (endpoint-specific)PagedResponse
// opts.results and opts.pages will be updated from the API response // opts.results and opts.pages will be updated from the API response
func (c *Client) listHelperWithID(ctx context.Context, i interface{}, id int, opts *ListOptions) error { func (c *Client) listHelperWithID(ctx context.Context, i interface{}, idRaw interface{}, opts *ListOptions) error {
req := c.R(ctx) req := c.R(ctx)
if opts != nil && opts.Page > 0 { if opts != nil && opts.Page > 0 {
req.SetQueryParam("page", strconv.Itoa(opts.Page)) req.SetQueryParam("page", strconv.Itoa(opts.Page))
@ -321,6 +260,8 @@ func (c *Client) listHelperWithID(ctx context.Context, i interface{}, id int, op
r *resty.Response r *resty.Response
) )
id, _ := idRaw.(int)
if opts != nil && len(opts.Filter) > 0 { if opts != nil && len(opts.Filter) > 0 {
req.SetHeader("X-Filter", opts.Filter) req.SetHeader("X-Filter", opts.Filter)
} }
@ -366,6 +307,14 @@ func (c *Client) listHelperWithID(ctx context.Context, i interface{}, id int, op
results = r.Result().(*InstanceVolumesPagedResponse).Results results = r.Result().(*InstanceVolumesPagedResponse).Results
v.appendData(r.Result().(*InstanceVolumesPagedResponse)) v.appendData(r.Result().(*InstanceVolumesPagedResponse))
} }
case *TaggedObjectsPagedResponse:
idStr := idRaw.(string)
if r, err = coupleAPIErrors(req.SetResult(TaggedObjectsPagedResponse{}).Get(v.endpointWithID(c, idStr))); err == nil {
pages = r.Result().(*TaggedObjectsPagedResponse).Pages
results = r.Result().(*TaggedObjectsPagedResponse).Results
v.appendData(r.Result().(*TaggedObjectsPagedResponse))
}
/** /**
case TicketAttachmentsPagedResponse: case TicketAttachmentsPagedResponse:
if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil { if r, err = req.SetResult(v).Get(v.endpoint(c)); r.Error() != nil {

View file

@ -1 +1,117 @@
package linodego package linodego
/*
- copy profile_test.go and do the same
- When updating Profile structs,
- use pointers where ever null'able would have a different meaning if the wrapper
supplied "" or 0 instead
- Add "NameOfResource" to client.go, resources.go, pagination.go
*/
import (
"context"
"encoding/json"
)
// LishAuthMethod constants start with AuthMethod and include Linode API Lish Authentication Methods
type LishAuthMethod string
// LishAuthMethod constants are the methods of authentication allowed when connecting via Lish
const (
AuthMethodPasswordKeys LishAuthMethod = "password_keys"
AuthMethodKeysOnly LishAuthMethod = "keys_only"
AuthMethodDisabled LishAuthMethod = "disabled"
)
// ProfileReferrals represent a User's status in the Referral Program
type ProfileReferrals struct {
Total int `json:"total"`
Completed int `json:"completed"`
Pending int `json:"pending"`
Credit float64 `json:"credit"`
Code string `json:"code"`
URL string `json:"url"`
}
// Profile represents a Profile object
type Profile struct {
UID int `json:"uid"`
Username string `json:"username"`
Email string `json:"email"`
Timezone string `json:"timezone"`
EmailNotifications bool `json:"email_notifications"`
IPWhitelistEnabled bool `json:"ip_whitelist_enabled"`
TwoFactorAuth bool `json:"two_factor_auth"`
Restricted bool `json:"restricted"`
LishAuthMethod LishAuthMethod `json:"lish_auth_method"`
Referrals ProfileReferrals `json:"referrals"`
AuthorizedKeys []string `json:"authorized_keys"`
}
// ProfileUpdateOptions fields are those accepted by UpdateProfile
type ProfileUpdateOptions struct {
Email string `json:"email,omitempty"`
Timezone string `json:"timezone,omitempty"`
EmailNotifications *bool `json:"email_notifications,omitempty"`
IPWhitelistEnabled *bool `json:"ip_whitelist_enabled,omitempty"`
LishAuthMethod LishAuthMethod `json:"lish_auth_method,omitempty"`
AuthorizedKeys *[]string `json:"authorized_keys,omitempty"`
TwoFactorAuth *bool `json:"two_factor_auth,omitempty"`
Restricted *bool `json:"restricted,omitempty"`
}
// GetUpdateOptions converts a Profile to ProfileUpdateOptions for use in UpdateProfile
func (i Profile) GetUpdateOptions() (o ProfileUpdateOptions) {
o.Email = i.Email
o.Timezone = i.Timezone
o.EmailNotifications = copyBool(&i.EmailNotifications)
o.IPWhitelistEnabled = copyBool(&i.IPWhitelistEnabled)
o.LishAuthMethod = i.LishAuthMethod
authorizedKeys := make([]string, len(i.AuthorizedKeys))
copy(authorizedKeys, i.AuthorizedKeys)
o.AuthorizedKeys = &authorizedKeys
o.TwoFactorAuth = copyBool(&i.TwoFactorAuth)
o.Restricted = copyBool(&i.Restricted)
return
}
// GetProfile gets the profile with the provided ID
func (c *Client) GetProfile(ctx context.Context) (*Profile, error) {
e, err := c.Profile.Endpoint()
if err != nil {
return nil, err
}
r, err := coupleAPIErrors(c.R(ctx).SetResult(&Profile{}).Get(e))
if err != nil {
return nil, err
}
return r.Result().(*Profile), nil
}
// UpdateProfile updates the Profile with the specified id
func (c *Client) UpdateProfile(ctx context.Context, updateOpts ProfileUpdateOptions) (*Profile, error) {
var body string
e, err := c.Profile.Endpoint()
if err != nil {
return nil, err
}
req := c.R(ctx).SetResult(&Profile{})
if bodyData, err := json.Marshal(updateOpts); err == nil {
body = string(bodyData)
} else {
return nil, NewError(err)
}
r, err := coupleAPIErrors(req.
SetBody(body).
Put(e))
if err != nil {
return nil, err
}
return r.Result().(*Profile), nil
}

195
vendor/github.com/linode/linodego/profile_tokens.go generated vendored Normal file
View file

@ -0,0 +1,195 @@
package linodego
import (
"context"
"encoding/json"
"fmt"
"time"
)
// Token represents a Token object
type Token struct {
// This token's unique ID, which can be used to revoke it.
ID int `json:"id"`
// The scopes this token was created with. These define what parts of the Account the token can be used to access. Many command-line tools, such as the Linode CLI, require tokens with access to *. Tokens with more restrictive scopes are generally more secure.
Scopes string `json:"scopes"`
// This token's label. This is for display purposes only, but can be used to more easily track what you're using each token for. (1-100 Characters)
Label string `json:"label"`
// The token used to access the API. When the token is created, the full token is returned here. Otherwise, only the first 16 characters are returned.
Token string `json:"token"`
// The date and time this token was created.
Created *time.Time `json:"-"`
CreatedStr string `json:"created"`
// When this token will expire. Personal Access Tokens cannot be renewed, so after this time the token will be completely unusable and a new token will need to be generated. Tokens may be created with "null" as their expiry and will never expire unless revoked.
Expiry *time.Time `json:"-"`
ExpiryStr string `json:"expiry"`
}
// TokenCreateOptions fields are those accepted by CreateToken
type TokenCreateOptions struct {
// The scopes this token was created with. These define what parts of the Account the token can be used to access. Many command-line tools, such as the Linode CLI, require tokens with access to *. Tokens with more restrictive scopes are generally more secure.
Scopes string `json:"scopes"`
// This token's label. This is for display purposes only, but can be used to more easily track what you're using each token for. (1-100 Characters)
Label string `json:"label"`
// When this token will expire. Personal Access Tokens cannot be renewed, so after this time the token will be completely unusable and a new token will need to be generated. Tokens may be created with "null" as their expiry and will never expire unless revoked.
Expiry *time.Time `json:"expiry"`
}
// TokenUpdateOptions fields are those accepted by UpdateToken
type TokenUpdateOptions struct {
// This token's label. This is for display purposes only, but can be used to more easily track what you're using each token for. (1-100 Characters)
Label string `json:"label"`
}
// GetCreateOptions converts a Token to TokenCreateOptions for use in CreateToken
func (i Token) GetCreateOptions() (o TokenCreateOptions) {
o.Label = i.Label
o.Expiry = copyTime(i.Expiry)
o.Scopes = i.Scopes
return
}
// GetUpdateOptions converts a Token to TokenUpdateOptions for use in UpdateToken
func (i Token) GetUpdateOptions() (o TokenUpdateOptions) {
o.Label = i.Label
return
}
// TokensPagedResponse represents a paginated Token API response
type TokensPagedResponse struct {
*PageOptions
Data []Token `json:"data"`
}
// endpoint gets the endpoint URL for Token
func (TokensPagedResponse) endpoint(c *Client) string {
endpoint, err := c.Tokens.Endpoint()
if err != nil {
panic(err)
}
return endpoint
}
// appendData appends Tokens when processing paginated Token responses
func (resp *TokensPagedResponse) appendData(r *TokensPagedResponse) {
resp.Data = append(resp.Data, r.Data...)
}
// ListTokens lists Tokens
func (c *Client) ListTokens(ctx context.Context, opts *ListOptions) ([]Token, error) {
response := TokensPagedResponse{}
err := c.listHelper(ctx, &response, opts)
for i := range response.Data {
response.Data[i].fixDates()
}
if err != nil {
return nil, err
}
return response.Data, nil
}
// fixDates converts JSON timestamps to Go time.Time values
func (i *Token) fixDates() *Token {
i.Created, _ = parseDates(i.CreatedStr)
i.Expiry, _ = parseDates(i.ExpiryStr)
return i
}
// GetToken gets the token with the provided ID
func (c *Client) GetToken(ctx context.Context, id int) (*Token, error) {
e, err := c.Tokens.Endpoint()
if err != nil {
return nil, err
}
e = fmt.Sprintf("%s/%d", e, id)
r, err := coupleAPIErrors(c.R(ctx).SetResult(&Token{}).Get(e))
if err != nil {
return nil, err
}
return r.Result().(*Token).fixDates(), nil
}
// CreateToken creates a Token
func (c *Client) CreateToken(ctx context.Context, createOpts TokenCreateOptions) (*Token, error) {
var body string
e, err := c.Tokens.Endpoint()
if err != nil {
return nil, err
}
req := c.R(ctx).SetResult(&Token{})
// Format the Time as a string to meet the ISO8601 requirement
createOptsFixed := struct {
Label string `json:"label"`
Scopes string `json:"scopes"`
Expiry *string `json:"expiry"`
}{}
createOptsFixed.Label = createOpts.Label
createOptsFixed.Scopes = createOpts.Scopes
if createOpts.Expiry != nil {
iso8601Expiry := createOpts.Expiry.UTC().Format("2006-01-02T15:04:05")
createOptsFixed.Expiry = &iso8601Expiry
}
if bodyData, err := json.Marshal(createOptsFixed); err == nil {
body = string(bodyData)
} else {
return nil, NewError(err)
}
r, err := coupleAPIErrors(req.
SetBody(body).
Post(e))
if err != nil {
return nil, err
}
return r.Result().(*Token).fixDates(), nil
}
// UpdateToken updates the Token with the specified id
func (c *Client) UpdateToken(ctx context.Context, id int, updateOpts TokenUpdateOptions) (*Token, error) {
var body string
e, err := c.Tokens.Endpoint()
if err != nil {
return nil, err
}
e = fmt.Sprintf("%s/%d", e, id)
req := c.R(ctx).SetResult(&Token{})
if bodyData, err := json.Marshal(updateOpts); err == nil {
body = string(bodyData)
} else {
return nil, NewError(err)
}
r, err := coupleAPIErrors(req.
SetBody(body).
Put(e))
if err != nil {
return nil, err
}
return r.Result().(*Token).fixDates(), nil
}
// DeleteToken deletes the Token with the specified id
func (c *Client) DeleteToken(ctx context.Context, id int) error {
e, err := c.Tokens.Endpoint()
if err != nil {
return err
}
e = fmt.Sprintf("%s/%d", e, id)
_, err = coupleAPIErrors(c.R(ctx).Delete(e))
return err
}

View file

@ -6,7 +6,7 @@ import (
"fmt" "fmt"
"text/template" "text/template"
"github.com/go-resty/resty" "gopkg.in/resty.v1"
) )
const ( const (
@ -33,14 +33,18 @@ const (
nodebalancersName = "nodebalancers" nodebalancersName = "nodebalancers"
nodebalancerconfigsName = "nodebalancerconfigs" nodebalancerconfigsName = "nodebalancerconfigs"
nodebalancernodesName = "nodebalancernodes" nodebalancernodesName = "nodebalancernodes"
notificationsName = "notifications"
sshkeysName = "sshkeys" sshkeysName = "sshkeys"
ticketsName = "tickets" ticketsName = "tickets"
tokensName = "tokens"
accountName = "account" accountName = "account"
eventsName = "events" eventsName = "events"
invoicesName = "invoices" invoicesName = "invoices"
invoiceItemsName = "invoiceitems" invoiceItemsName = "invoiceitems"
profileName = "profile" profileName = "profile"
managedName = "managed" managedName = "managed"
tagsName = "tags"
usersName = "users"
// notificationsName = "notifications" // notificationsName = "notifications"
stackscriptsEndpoint = "linode/stackscripts" stackscriptsEndpoint = "linode/stackscripts"
@ -51,9 +55,9 @@ const (
instanceSnapshotsEndpoint = "linode/instances/{{ .ID }}/backups" instanceSnapshotsEndpoint = "linode/instances/{{ .ID }}/backups"
instanceIPsEndpoint = "linode/instances/{{ .ID }}/ips" instanceIPsEndpoint = "linode/instances/{{ .ID }}/ips"
instanceVolumesEndpoint = "linode/instances/{{ .ID }}/volumes" instanceVolumesEndpoint = "linode/instances/{{ .ID }}/volumes"
ipaddressesEndpoint = "network/ips" ipaddressesEndpoint = "networking/ips"
ipv6poolsEndpoint = "network/ipv6/pools" ipv6poolsEndpoint = "networking/ipv6/pools"
ipv6rangesEndpoint = "network/ipv6/ranges" ipv6rangesEndpoint = "networking/ipv6/ranges"
regionsEndpoint = "regions" regionsEndpoint = "regions"
volumesEndpoint = "volumes" volumesEndpoint = "volumes"
kernelsEndpoint = "linode/kernels" kernelsEndpoint = "linode/kernels"
@ -72,13 +76,16 @@ const (
nodebalancernodesEndpoint = "nodebalancers/{{ .ID }}/configs/{{ .SecondID }}/nodes" nodebalancernodesEndpoint = "nodebalancers/{{ .ID }}/configs/{{ .SecondID }}/nodes"
sshkeysEndpoint = "profile/sshkeys" sshkeysEndpoint = "profile/sshkeys"
ticketsEndpoint = "support/tickets" ticketsEndpoint = "support/tickets"
tokensEndpoint = "profile/tokens"
accountEndpoint = "account" accountEndpoint = "account"
eventsEndpoint = "account/events" eventsEndpoint = "account/events"
invoicesEndpoint = "account/invoices" invoicesEndpoint = "account/invoices"
invoiceItemsEndpoint = "account/invoices/{{ .ID }}/items" invoiceItemsEndpoint = "account/invoices/{{ .ID }}/items"
profileEndpoint = "profile" profileEndpoint = "profile"
managedEndpoint = "managed" managedEndpoint = "managed"
// notificationsEndpoint = "account/notifications" tagsEndpoint = "tags"
usersEndpoint = "account/users"
notificationsEndpoint = "account/notifications"
) )
// Resource represents a linode API resource // Resource represents a linode API resource

219
vendor/github.com/linode/linodego/tags.go generated vendored Normal file
View file

@ -0,0 +1,219 @@
package linodego
import (
"context"
"encoding/json"
"errors"
"fmt"
)
// Tag represents a Tag object
type Tag struct {
Label string `json:"label"`
}
// TaggedObject represents a Tagged Object object
type TaggedObject struct {
Type string `json:"type"`
RawData json.RawMessage `json:"data"`
Data interface{} `json:"-"`
}
// SortedObjects currently only includes Instances
type SortedObjects struct {
Instances []Instance
Domains []Domain
Volumes []Volume
NodeBalancers []NodeBalancer
/*
StackScripts []Stackscript
*/
}
// TaggedObjectList are a list of TaggedObjects, as returning by ListTaggedObjects
type TaggedObjectList []TaggedObject
// TagCreateOptions fields are those accepted by CreateTag
type TagCreateOptions struct {
Label string `json:"label"`
Linodes []int `json:"linodes,omitempty"`
Domains []int `json:"domains,omitempty"`
Volumes []int `json:"volumes,omitempty"`
NodeBalancers []int `json:"nodebalancers,omitempty"`
}
// GetCreateOptions converts a Tag to TagCreateOptions for use in CreateTag
func (i Tag) GetCreateOptions() (o TagCreateOptions) {
o.Label = i.Label
return
}
// TaggedObjectsPagedResponse represents a paginated Tag API response
type TaggedObjectsPagedResponse struct {
*PageOptions
Data []TaggedObject `json:"data"`
}
// TagsPagedResponse represents a paginated Tag API response
type TagsPagedResponse struct {
*PageOptions
Data []Tag `json:"data"`
}
// endpoint gets the endpoint URL for Tag
func (TagsPagedResponse) endpoint(c *Client) string {
endpoint, err := c.Tags.Endpoint()
if err != nil {
panic(err)
}
return endpoint
}
// endpoint gets the endpoint URL for Tag
func (TaggedObjectsPagedResponse) endpointWithID(c *Client, id string) string {
endpoint, err := c.Tags.Endpoint()
if err != nil {
panic(err)
}
endpoint = fmt.Sprintf("%s/%s", endpoint, id)
return endpoint
}
// appendData appends Tags when processing paginated Tag responses
func (resp *TagsPagedResponse) appendData(r *TagsPagedResponse) {
resp.Data = append(resp.Data, r.Data...)
}
// appendData appends TaggedObjects when processing paginated TaggedObjects responses
func (resp *TaggedObjectsPagedResponse) appendData(r *TaggedObjectsPagedResponse) {
resp.Data = append(resp.Data, r.Data...)
}
// ListTags lists Tags
func (c *Client) ListTags(ctx context.Context, opts *ListOptions) ([]Tag, error) {
response := TagsPagedResponse{}
err := c.listHelper(ctx, &response, opts)
if err != nil {
return nil, err
}
return response.Data, nil
}
// fixData stores an object of the type defined by Type in Data using RawData
func (i *TaggedObject) fixData() (*TaggedObject, error) {
switch i.Type {
case "linode":
obj := Instance{}
if err := json.Unmarshal(i.RawData, &obj); err != nil {
return nil, err
}
i.Data = obj
case "nodebalancer":
obj := NodeBalancer{}
if err := json.Unmarshal(i.RawData, &obj); err != nil {
return nil, err
}
i.Data = obj
case "domain":
obj := Domain{}
if err := json.Unmarshal(i.RawData, &obj); err != nil {
return nil, err
}
i.Data = obj
case "volume":
obj := Volume{}
if err := json.Unmarshal(i.RawData, &obj); err != nil {
return nil, err
}
i.Data = obj
}
return i, nil
}
// ListTaggedObjects lists Tagged Objects
func (c *Client) ListTaggedObjects(ctx context.Context, label string, opts *ListOptions) (TaggedObjectList, error) {
response := TaggedObjectsPagedResponse{}
err := c.listHelperWithID(ctx, &response, label, opts)
if err != nil {
return nil, err
}
for i := range response.Data {
if _, err := response.Data[i].fixData(); err != nil {
return nil, err
}
}
return response.Data, nil
}
// SortedObjects converts a list of TaggedObjects into a Sorted Objects struct, for easier access
func (t TaggedObjectList) SortedObjects() (SortedObjects, error) {
so := SortedObjects{}
for _, o := range t {
switch o.Type {
case "linode":
if instance, ok := o.Data.(Instance); ok {
so.Instances = append(so.Instances, instance)
} else {
return so, errors.New("Expected an Instance when Type was \"linode\"")
}
case "domain":
if domain, ok := o.Data.(Domain); ok {
so.Domains = append(so.Domains, domain)
} else {
return so, errors.New("Expected a Domain when Type was \"domain\"")
}
case "volume":
if volume, ok := o.Data.(Volume); ok {
so.Volumes = append(so.Volumes, volume)
} else {
return so, errors.New("Expected an Volume when Type was \"volume\"")
}
case "nodebalancer":
if nodebalancer, ok := o.Data.(NodeBalancer); ok {
so.NodeBalancers = append(so.NodeBalancers, nodebalancer)
} else {
return so, errors.New("Expected an NodeBalancer when Type was \"nodebalancer\"")
}
}
}
return so, nil
}
// CreateTag creates a Tag
func (c *Client) CreateTag(ctx context.Context, createOpts TagCreateOptions) (*Tag, error) {
var body string
e, err := c.Tags.Endpoint()
if err != nil {
return nil, err
}
req := c.R(ctx).SetResult(&Tag{})
if bodyData, err := json.Marshal(createOpts); err == nil {
body = string(bodyData)
} else {
return nil, NewError(err)
}
r, err := coupleAPIErrors(req.
SetBody(body).
Post(e))
if err != nil {
return nil, err
}
return r.Result().(*Tag), nil
}
// DeleteTag deletes the Tag with the specified id
func (c *Client) DeleteTag(ctx context.Context, label string) error {
e, err := c.Tags.Endpoint()
if err != nil {
return err
}
e = fmt.Sprintf("%s/%s", e, label)
_, err = coupleAPIErrors(c.R(ctx).Delete(e))
return err
}

View file

@ -36,14 +36,14 @@ type TemplateUpdateOptions struct {
// GetCreateOptions converts a Template to TemplateCreateOptions for use in CreateTemplate // GetCreateOptions converts a Template to TemplateCreateOptions for use in CreateTemplate
func (i Template) GetCreateOptions() (o TemplateCreateOptions) { func (i Template) GetCreateOptions() (o TemplateCreateOptions) {
// o.Label = i.Label // o.Label = i.Label
// o.Description = copyString(o.Description) // o.Description = copyString(i.Description)
return return
} }
// GetUpdateOptions converts a Template to TemplateUpdateOptions for use in UpdateTemplate // GetUpdateOptions converts a Template to TemplateUpdateOptions for use in UpdateTemplate
func (i Template) GetUpdateOptions() (o TemplateUpdateOptions) { func (i Template) GetUpdateOptions() (o TemplateUpdateOptions) {
// o.Label = i.Label // o.Label = i.Label
// o.Description = copyString(o.Description) // o.Description = copyString(i.Description)
return return
} }

View file

@ -9,7 +9,7 @@ import (
type LinodeType struct { type LinodeType struct {
ID string `json:"id"` ID string `json:"id"`
Disk int `json:"disk"` Disk int `json:"disk"`
Class LinodeTypeClass `json:"class"` // enum: nanode, standard, highmem Class LinodeTypeClass `json:"class"` // enum: nanode, standard, highmem, dedicated
Price *LinodePrice `json:"price"` Price *LinodePrice `json:"price"`
Label string `json:"label"` Label string `json:"label"`
Addons *LinodeAddons `json:"addons"` Addons *LinodeAddons `json:"addons"`
@ -40,9 +40,10 @@ type LinodeTypeClass string
// LinodeTypeClass contants are the Instance Type Classes that an Instance Type can be assigned // LinodeTypeClass contants are the Instance Type Classes that an Instance Type can be assigned
const ( const (
ClassNanode LinodeTypeClass = "nanode" ClassNanode LinodeTypeClass = "nanode"
ClassStandard LinodeTypeClass = "standard" ClassStandard LinodeTypeClass = "standard"
ClassHighmem LinodeTypeClass = "highmem" ClassHighmem LinodeTypeClass = "highmem"
ClassDedicated LinodeTypeClass = "dedicated"
) )
// LinodeTypesPagedResponse represents a linode types API response for listing // LinodeTypesPagedResponse represents a linode types API response for listing

View file

@ -36,6 +36,7 @@ type Volume struct {
Size int `json:"size"` Size int `json:"size"`
LinodeID *int `json:"linode_id"` LinodeID *int `json:"linode_id"`
FilesystemPath string `json:"filesystem_path"` FilesystemPath string `json:"filesystem_path"`
Tags []string `json:"tags"`
Created time.Time `json:"-"` Created time.Time `json:"-"`
Updated time.Time `json:"-"` Updated time.Time `json:"-"`
} }
@ -48,6 +49,14 @@ type VolumeCreateOptions struct {
ConfigID int `json:"config_id,omitempty"` ConfigID int `json:"config_id,omitempty"`
// The Volume's size, in GiB. Minimum size is 10GiB, maximum size is 10240GiB. A "0" value will result in the default size. // The Volume's size, in GiB. Minimum size is 10GiB, maximum size is 10240GiB. A "0" value will result in the default size.
Size int `json:"size,omitempty"` Size int `json:"size,omitempty"`
// An array of tags applied to this object. Tags are for organizational purposes only.
Tags []string `json:"tags"`
}
// VolumeUpdateOptions fields are those accepted by UpdateVolume
type VolumeUpdateOptions struct {
Label string `json:"label,omitempty"`
Tags *[]string `json:"tags,omitempty"`
} }
// VolumeAttachOptions fields are those accepted by AttachVolume // VolumeAttachOptions fields are those accepted by AttachVolume
@ -62,6 +71,25 @@ type VolumesPagedResponse struct {
Data []Volume `json:"data"` Data []Volume `json:"data"`
} }
// GetUpdateOptions converts a Volume to VolumeUpdateOptions for use in UpdateVolume
func (v Volume) GetUpdateOptions() (updateOpts VolumeUpdateOptions) {
updateOpts.Label = v.Label
updateOpts.Tags = &v.Tags
return
}
// GetCreateOptions converts a Volume to VolumeCreateOptions for use in CreateVolume
func (v Volume) GetCreateOptions() (createOpts VolumeCreateOptions) {
createOpts.Label = v.Label
createOpts.Tags = v.Tags
createOpts.Region = v.Region
createOpts.Size = v.Size
if v.LinodeID != nil && *v.LinodeID > 0 {
createOpts.LinodeID = *v.LinodeID
}
return
}
// endpoint gets the endpoint URL for Volume // endpoint gets the endpoint URL for Volume
func (VolumesPagedResponse) endpoint(c *Client) string { func (VolumesPagedResponse) endpoint(c *Client) string {
endpoint, err := c.Volumes.Endpoint() endpoint, err := c.Volumes.Endpoint()
@ -168,26 +196,37 @@ func (c *Client) CreateVolume(ctx context.Context, createOpts VolumeCreateOption
} }
// RenameVolume renames the label of a Linode volume // RenameVolume renames the label of a Linode volume
// There is no UpdateVolume because the label is the only alterable field. // DEPRECATED: use UpdateVolume
func (c *Client) RenameVolume(ctx context.Context, id int, label string) (*Volume, error) { func (c *Client) RenameVolume(ctx context.Context, id int, label string) (*Volume, error) {
body, _ := json.Marshal(map[string]string{"label": label}) updateOpts := VolumeUpdateOptions{Label: label}
return c.UpdateVolume(ctx, id, updateOpts)
}
// UpdateVolume updates the Volume with the specified id
func (c *Client) UpdateVolume(ctx context.Context, id int, volume VolumeUpdateOptions) (*Volume, error) {
var body string
e, err := c.Volumes.Endpoint() e, err := c.Volumes.Endpoint()
if err != nil { if err != nil {
return nil, NewError(err) return nil, err
} }
e = fmt.Sprintf("%s/%d", e, id) e = fmt.Sprintf("%s/%d", e, id)
resp, err := coupleAPIErrors(c.R(ctx). req := c.R(ctx).SetResult(&Volume{})
SetResult(&Volume{}).
if bodyData, err := json.Marshal(volume); err == nil {
body = string(bodyData)
} else {
return nil, NewError(err)
}
r, err := coupleAPIErrors(req.
SetBody(body). SetBody(body).
Put(e)) Put(e))
if err != nil { if err != nil {
return nil, err return nil, err
} }
return r.Result().(*Volume).fixDates(), nil
return resp.Result().(*Volume).fixDates(), nil
} }
// CloneVolume clones a Linode volume // CloneVolume clones a Linode volume

View file

@ -16,7 +16,7 @@ func (client Client) WaitForInstanceStatus(ctx context.Context, instanceID int,
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel() defer cancel()
ticker := time.NewTicker(time.Second) ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
@ -36,13 +36,46 @@ func (client Client) WaitForInstanceStatus(ctx context.Context, instanceID int,
} }
} }
// WaitForInstanceDiskStatus waits for the Linode instance disk to reach the desired state
// before returning. It will timeout with an error after timeoutSeconds.
func (client Client) WaitForInstanceDiskStatus(ctx context.Context, instanceID int, diskID int, status DiskStatus, timeoutSeconds int) (*InstanceDisk, error) {
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel()
ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
// GetInstanceDisk will 404 on newly created disks. use List instead.
// disk, err := client.GetInstanceDisk(ctx, instanceID, diskID)
disks, err := client.ListInstanceDisks(ctx, instanceID, nil)
if err != nil {
return nil, err
}
for _, disk := range disks {
if disk.ID == diskID {
complete := (disk.Status == status)
if complete {
return &disk, nil
}
break
}
}
case <-ctx.Done():
return nil, fmt.Errorf("Error waiting for Instance %d Disk %d status %s: %s", instanceID, diskID, status, ctx.Err())
}
}
}
// WaitForVolumeStatus waits for the Volume to reach the desired state // WaitForVolumeStatus waits for the Volume to reach the desired state
// before returning. It will timeout with an error after timeoutSeconds. // before returning. It will timeout with an error after timeoutSeconds.
func (client Client) WaitForVolumeStatus(ctx context.Context, volumeID int, status VolumeStatus, timeoutSeconds int) (*Volume, error) { func (client Client) WaitForVolumeStatus(ctx context.Context, volumeID int, status VolumeStatus, timeoutSeconds int) (*Volume, error) {
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel() defer cancel()
ticker := time.NewTicker(time.Second) ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
@ -68,7 +101,7 @@ func (client Client) WaitForSnapshotStatus(ctx context.Context, instanceID int,
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel() defer cancel()
ticker := time.NewTicker(time.Second) ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
@ -96,7 +129,7 @@ func (client Client) WaitForVolumeLinodeID(ctx context.Context, volumeID int, li
ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second) ctx, cancel := context.WithTimeout(ctx, time.Duration(timeoutSeconds)*time.Second)
defer cancel() defer cancel()
ticker := time.NewTicker(time.Second) ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
@ -163,7 +196,7 @@ func (client Client) WaitForEventFinished(ctx context.Context, id interface{}, e
log.Printf("[INFO] Waiting %d seconds for %s events since %v for %s %v", int(duration.Seconds()), action, minStart, titledEntityType, id) log.Printf("[INFO] Waiting %d seconds for %s events since %v for %s %v", int(duration.Seconds()), action, minStart, titledEntityType, id)
} }
ticker := time.NewTicker(time.Second) ticker := time.NewTicker(client.millisecondsPerPoll * time.Millisecond)
defer ticker.Stop() defer ticker.Stop()
for { for {
select { select {
@ -180,7 +213,7 @@ func (client Client) WaitForEventFinished(ctx context.Context, id interface{}, e
// log.Println("action mismatch", event.Action, action) // log.Println("action mismatch", event.Action, action)
continue continue
} }
if event.Entity.Type != entityType { if event.Entity == nil || event.Entity.Type != entityType {
// log.Println("type mismatch", event.Entity.Type, entityType) // log.Println("type mismatch", event.Entity.Type, entityType)
continue continue
} }

View file

@ -10,7 +10,7 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction
// * opcode isn't OpcodeQuery or OpcodeNotify // * opcode isn't OpcodeQuery or OpcodeNotify
// * Zero bit isn't zero // * Zero bit isn't zero
// * has more than 1 question in the question section // * has more than 1 question in the question section
// * has more than 0 RRs in the Answer section // * has more than 1 RR in the Answer section
// * has more than 0 RRs in the Authority section // * has more than 0 RRs in the Authority section
// * has more than 2 RRs in the Additional section // * has more than 2 RRs in the Additional section
var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc
@ -24,7 +24,7 @@ const (
MsgIgnore // Ignore the error and send nothing back. MsgIgnore // Ignore the error and send nothing back.
) )
var defaultMsgAcceptFunc = func(dh Header) MsgAcceptAction { func defaultMsgAcceptFunc(dh Header) MsgAcceptAction {
if isResponse := dh.Bits&_QR != 0; isResponse { if isResponse := dh.Bits&_QR != 0; isResponse {
return MsgIgnore return MsgIgnore
} }
@ -41,10 +41,12 @@ var defaultMsgAcceptFunc = func(dh Header) MsgAcceptAction {
if dh.Qdcount != 1 { if dh.Qdcount != 1 {
return MsgReject return MsgReject
} }
if dh.Ancount != 0 { // NOTIFY requests can have a SOA in the ANSWER section. See RFC 1996 Section 3.7 and 3.11.
if dh.Ancount > 1 {
return MsgReject return MsgReject
} }
if dh.Nscount != 0 { // IXFR request could have one SOA RR in the NS section. See RFC 1995, section 3.
if dh.Nscount > 1 {
return MsgReject return MsgReject
} }
if dh.Arcount > 2 { if dh.Arcount > 2 {

151
vendor/github.com/miekg/dns/client.go generated vendored
View file

@ -3,10 +3,10 @@ package dns
// A client implementation. // A client implementation.
import ( import (
"bytes"
"context" "context"
"crypto/tls" "crypto/tls"
"encoding/binary" "encoding/binary"
"fmt"
"io" "io"
"net" "net"
"strings" "strings"
@ -129,20 +129,15 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er
return c.exchange(m, address) return c.exchange(m, address)
} }
t := "nop" q := m.Question[0]
if t1, ok := TypeToString[m.Question[0].Qtype]; ok { key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass)
t = t1 r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) {
}
cl := "nop"
if cl1, ok := ClassToString[m.Question[0].Qclass]; ok {
cl = cl1
}
r, rtt, err, shared := c.group.Do(m.Question[0].Name+t+cl, func() (*Msg, time.Duration, error) {
return c.exchange(m, address) return c.exchange(m, address)
}) })
if r != nil && shared { if r != nil && shared {
r = r.Copy() r = r.Copy()
} }
return r, rtt, err return r, rtt, err
} }
@ -220,18 +215,15 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
n int n int
err error err error
) )
switch co.Conn.(type) {
switch t := co.Conn.(type) {
case *net.TCPConn, *tls.Conn: case *net.TCPConn, *tls.Conn:
r := t.(io.Reader) var length uint16
if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
// First two bytes specify the length of the entire message.
l, err := tcpMsgLen(r)
if err != nil {
return nil, err return nil, err
} }
p = make([]byte, l)
n, err = tcpRead(r, p) p = make([]byte, length)
n, err = io.ReadFull(co.Conn, p)
default: default:
if co.UDPSize > MinMsgSize { if co.UDPSize > MinMsgSize {
p = make([]byte, co.UDPSize) p = make([]byte, co.UDPSize)
@ -258,78 +250,27 @@ func (co *Conn) ReadMsgHeader(hdr *Header) ([]byte, error) {
return p, err return p, err
} }
// tcpMsgLen is a helper func to read first two bytes of stream as uint16 packet length.
func tcpMsgLen(t io.Reader) (int, error) {
p := []byte{0, 0}
n, err := t.Read(p)
if err != nil {
return 0, err
}
// As seen with my local router/switch, returns 1 byte on the above read,
// resulting a a ShortRead. Just write it out (instead of loop) and read the
// other byte.
if n == 1 {
n1, err := t.Read(p[1:])
if err != nil {
return 0, err
}
n += n1
}
if n != 2 {
return 0, ErrShortRead
}
l := binary.BigEndian.Uint16(p)
if l == 0 {
return 0, ErrShortRead
}
return int(l), nil
}
// tcpRead calls TCPConn.Read enough times to fill allocated buffer.
func tcpRead(t io.Reader, p []byte) (int, error) {
n, err := t.Read(p)
if err != nil {
return n, err
}
for n < len(p) {
j, err := t.Read(p[n:])
if err != nil {
return n, err
}
n += j
}
return n, err
}
// Read implements the net.Conn read method. // Read implements the net.Conn read method.
func (co *Conn) Read(p []byte) (n int, err error) { func (co *Conn) Read(p []byte) (n int, err error) {
if co.Conn == nil { if co.Conn == nil {
return 0, ErrConnEmpty return 0, ErrConnEmpty
} }
if len(p) < 2 {
return 0, io.ErrShortBuffer
}
switch t := co.Conn.(type) {
case *net.TCPConn, *tls.Conn:
r := t.(io.Reader)
l, err := tcpMsgLen(r) switch co.Conn.(type) {
if err != nil { case *net.TCPConn, *tls.Conn:
var length uint16
if err := binary.Read(co.Conn, binary.BigEndian, &length); err != nil {
return 0, err return 0, err
} }
if l > len(p) { if int(length) > len(p) {
return int(l), io.ErrShortBuffer return 0, io.ErrShortBuffer
} }
return tcpRead(r, p[:l])
return io.ReadFull(co.Conn, p[:length])
} }
// UDP connection // UDP connection
n, err = co.Conn.Read(p) return co.Conn.Read(p)
if err != nil {
return n, err
}
return n, err
} }
// WriteMsg sends a message through the connection co. // WriteMsg sends a message through the connection co.
@ -351,33 +292,26 @@ func (co *Conn) WriteMsg(m *Msg) (err error) {
if err != nil { if err != nil {
return err return err
} }
if _, err = co.Write(out); err != nil { _, err = co.Write(out)
return err return err
}
return nil
} }
// Write implements the net.Conn Write method. // Write implements the net.Conn Write method.
func (co *Conn) Write(p []byte) (n int, err error) { func (co *Conn) Write(p []byte) (n int, err error) {
switch t := co.Conn.(type) { switch co.Conn.(type) {
case *net.TCPConn, *tls.Conn: case *net.TCPConn, *tls.Conn:
w := t.(io.Writer) if len(p) > MaxMsgSize {
lp := len(p)
if lp < 2 {
return 0, io.ErrShortBuffer
}
if lp > MaxMsgSize {
return 0, &Error{err: "message too large"} return 0, &Error{err: "message too large"}
} }
l := make([]byte, 2, lp+2)
binary.BigEndian.PutUint16(l, uint16(lp)) l := make([]byte, 2)
p = append(l, p...) binary.BigEndian.PutUint16(l, uint16(len(p)))
n, err := io.Copy(w, bytes.NewReader(p))
n, err := (&net.Buffers{l, p}).WriteTo(co.Conn)
return int(n), err return int(n), err
} }
n, err = co.Conn.Write(p)
return n, err return co.Conn.Write(p)
} }
// Return the appropriate timeout for a specific request // Return the appropriate timeout for a specific request
@ -420,7 +354,7 @@ func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error)
// ExchangeConn performs a synchronous query. It sends the message m via the connection // ExchangeConn performs a synchronous query. It sends the message m via the connection
// c and waits for a reply. The connection c is not closed by ExchangeConn. // c and waits for a reply. The connection c is not closed by ExchangeConn.
// This function is going away, but can easily be mimicked: // Deprecated: This function is going away, but can easily be mimicked:
// //
// co := &dns.Conn{Conn: c} // c is your net.Conn // co := &dns.Conn{Conn: c} // c is your net.Conn
// co.WriteMsg(m) // co.WriteMsg(m)
@ -444,11 +378,7 @@ func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) {
// DialTimeout acts like Dial but takes a timeout. // DialTimeout acts like Dial but takes a timeout.
func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) { func DialTimeout(network, address string, timeout time.Duration) (conn *Conn, err error) {
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}} client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}}
conn, err = client.Dial(address) return client.Dial(address)
if err != nil {
return nil, err
}
return conn, nil
} }
// DialWithTLS connects to the address on the named network with TLS. // DialWithTLS connects to the address on the named network with TLS.
@ -457,12 +387,7 @@ func DialWithTLS(network, address string, tlsConfig *tls.Config) (conn *Conn, er
network += "-tls" network += "-tls"
} }
client := Client{Net: network, TLSConfig: tlsConfig} client := Client{Net: network, TLSConfig: tlsConfig}
conn, err = client.Dial(address) return client.Dial(address)
if err != nil {
return nil, err
}
return conn, nil
} }
// DialTimeoutWithTLS acts like DialWithTLS but takes a timeout. // DialTimeoutWithTLS acts like DialWithTLS but takes a timeout.
@ -471,11 +396,7 @@ func DialTimeoutWithTLS(network, address string, tlsConfig *tls.Config, timeout
network += "-tls" network += "-tls"
} }
client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig} client := Client{Net: network, Dialer: &net.Dialer{Timeout: timeout}, TLSConfig: tlsConfig}
conn, err = client.Dial(address) return client.Dial(address)
if err != nil {
return nil, err
}
return conn, nil
} }
// ExchangeContext acts like Exchange, but honors the deadline on the provided // ExchangeContext acts like Exchange, but honors the deadline on the provided

View file

@ -68,14 +68,10 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) {
} }
case "search": // set search path to given servers case "search": // set search path to given servers
c.Search = make([]string, len(f)-1) c.Search = append([]string(nil), f[1:]...)
for i := 0; i < len(c.Search); i++ {
c.Search[i] = f[i+1]
}
case "options": // magic options case "options": // magic options
for i := 1; i < len(f); i++ { for _, s := range f[1:] {
s := f[i]
switch { switch {
case len(s) >= 6 && s[:6] == "ndots:": case len(s) >= 6 && s[:6] == "ndots:":
n, _ := strconv.Atoi(s[6:]) n, _ := strconv.Atoi(s[6:])

View file

@ -1,198 +0,0 @@
//+build ignore
// compression_generate.go is meant to run with go generate. It will use
// go/{importer,types} to track down all the RR struct types. Then for each type
// it will look to see if there are (compressible) names, if so it will add that
// type to compressionLenHelperType and comressionLenSearchType which "fake" the
// compression so that Len() is fast.
package main
import (
"bytes"
"fmt"
"go/format"
"go/importer"
"go/types"
"log"
"os"
)
var packageHdr = `
// Code generated by "go run compress_generate.go"; DO NOT EDIT.
package dns
`
// getTypeStruct will take a type and the package scope, and return the
// (innermost) struct if the type is considered a RR type (currently defined as
// those structs beginning with a RR_Header, could be redefined as implementing
// the RR interface). The bool return value indicates if embedded structs were
// resolved.
func getTypeStruct(t types.Type, scope *types.Scope) (*types.Struct, bool) {
st, ok := t.Underlying().(*types.Struct)
if !ok {
return nil, false
}
if st.Field(0).Type() == scope.Lookup("RR_Header").Type() {
return st, false
}
if st.Field(0).Anonymous() {
st, _ := getTypeStruct(st.Field(0).Type(), scope)
return st, true
}
return nil, false
}
func main() {
// Import and type-check the package
pkg, err := importer.Default().Import("github.com/miekg/dns")
fatalIfErr(err)
scope := pkg.Scope()
var domainTypes []string // Types that have a domain name in them (either compressible or not).
var cdomainTypes []string // Types that have a compressible domain name in them (subset of domainType)
Names:
for _, name := range scope.Names() {
o := scope.Lookup(name)
if o == nil || !o.Exported() {
continue
}
st, _ := getTypeStruct(o.Type(), scope)
if st == nil {
continue
}
if name == "PrivateRR" {
continue
}
if scope.Lookup("Type"+o.Name()) == nil && o.Name() != "RFC3597" {
log.Fatalf("Constant Type%s does not exist.", o.Name())
}
for i := 1; i < st.NumFields(); i++ {
if _, ok := st.Field(i).Type().(*types.Slice); ok {
if st.Tag(i) == `dns:"domain-name"` {
domainTypes = append(domainTypes, o.Name())
continue Names
}
if st.Tag(i) == `dns:"cdomain-name"` {
cdomainTypes = append(cdomainTypes, o.Name())
domainTypes = append(domainTypes, o.Name())
continue Names
}
continue
}
switch {
case st.Tag(i) == `dns:"domain-name"`:
domainTypes = append(domainTypes, o.Name())
continue Names
case st.Tag(i) == `dns:"cdomain-name"`:
cdomainTypes = append(cdomainTypes, o.Name())
domainTypes = append(domainTypes, o.Name())
continue Names
}
}
}
b := &bytes.Buffer{}
b.WriteString(packageHdr)
// compressionLenHelperType - all types that have domain-name/cdomain-name can be used for compressing names
fmt.Fprint(b, "func compressionLenHelperType(c map[string]struct{}, r RR, initLen int) int {\n")
fmt.Fprint(b, "currentLen := initLen\n")
fmt.Fprint(b, "switch x := r.(type) {\n")
for _, name := range domainTypes {
o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "case *%s:\n", name)
for i := 1; i < st.NumFields(); i++ {
out := func(s string) {
fmt.Fprintf(b, "currentLen -= len(x.%s) + 1\n", st.Field(i).Name())
fmt.Fprintf(b, "currentLen += compressionLenHelper(c, x.%s, currentLen)\n", st.Field(i).Name())
}
if _, ok := st.Field(i).Type().(*types.Slice); ok {
switch st.Tag(i) {
case `dns:"domain-name"`:
fallthrough
case `dns:"cdomain-name"`:
// For HIP we need to slice over the elements in this slice.
fmt.Fprintf(b, `for i := range x.%s {
currentLen -= len(x.%s[i]) + 1
}
`, st.Field(i).Name(), st.Field(i).Name())
fmt.Fprintf(b, `for i := range x.%s {
currentLen += compressionLenHelper(c, x.%s[i], currentLen)
}
`, st.Field(i).Name(), st.Field(i).Name())
}
continue
}
switch {
case st.Tag(i) == `dns:"cdomain-name"`:
fallthrough
case st.Tag(i) == `dns:"domain-name"`:
out(st.Field(i).Name())
}
}
}
fmt.Fprintln(b, "}\nreturn currentLen - initLen\n}\n\n")
// compressionLenSearchType - search cdomain-tags types for compressible names.
fmt.Fprint(b, "func compressionLenSearchType(c map[string]struct{}, r RR) (int, bool, int) {\n")
fmt.Fprint(b, "switch x := r.(type) {\n")
for _, name := range cdomainTypes {
o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "case *%s:\n", name)
j := 1
for i := 1; i < st.NumFields(); i++ {
out := func(s string, j int) {
fmt.Fprintf(b, "k%d, ok%d, sz%d := compressionLenSearch(c, x.%s)\n", j, j, j, st.Field(i).Name())
}
// There are no slice types with names that can be compressed.
switch {
case st.Tag(i) == `dns:"cdomain-name"`:
out(st.Field(i).Name(), j)
j++
}
}
k := "k1"
ok := "ok1"
sz := "sz1"
for i := 2; i < j; i++ {
k += fmt.Sprintf(" + k%d", i)
ok += fmt.Sprintf(" && ok%d", i)
sz += fmt.Sprintf(" + sz%d", i)
}
fmt.Fprintf(b, "return %s, %s, %s\n", k, ok, sz)
}
fmt.Fprintln(b, "}\nreturn 0, false, 0\n}\n\n")
// gofmt
res, err := format.Source(b.Bytes())
if err != nil {
b.WriteTo(os.Stderr)
log.Fatal(err)
}
f, err := os.Create("zcompress.go")
fatalIfErr(err)
defer f.Close()
f.Write(res)
}
func fatalIfErr(err error) {
if err != nil {
log.Fatal(err)
}
}

View file

@ -4,6 +4,7 @@ import (
"errors" "errors"
"net" "net"
"strconv" "strconv"
"strings"
) )
const hexDigit = "0123456789abcdef" const hexDigit = "0123456789abcdef"
@ -145,10 +146,9 @@ func (dns *Msg) IsTsig() *TSIG {
// record in the additional section will do. It returns the OPT record // record in the additional section will do. It returns the OPT record
// found or nil. // found or nil.
func (dns *Msg) IsEdns0() *OPT { func (dns *Msg) IsEdns0() *OPT {
// EDNS0 is at the end of the additional section, start there. // RFC 6891, Section 6.1.1 allows the OPT record to appear
// We might want to change this to *only* look at the last two // anywhere in the additional record section, but it's usually at
// records. So we see TSIG and/or OPT - this a slightly bigger // the end so start there.
// change though.
for i := len(dns.Extra) - 1; i >= 0; i-- { for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT { if dns.Extra[i].Header().Rrtype == TypeOPT {
return dns.Extra[i].(*OPT) return dns.Extra[i].(*OPT)
@ -157,17 +157,93 @@ func (dns *Msg) IsEdns0() *OPT {
return nil return nil
} }
// popEdns0 is like IsEdns0, but it removes the record from the message.
func (dns *Msg) popEdns0() *OPT {
// RFC 6891, Section 6.1.1 allows the OPT record to appear
// anywhere in the additional record section, but it's usually at
// the end so start there.
for i := len(dns.Extra) - 1; i >= 0; i-- {
if dns.Extra[i].Header().Rrtype == TypeOPT {
opt := dns.Extra[i].(*OPT)
dns.Extra = append(dns.Extra[:i], dns.Extra[i+1:]...)
return opt
}
}
return nil
}
// IsDomainName checks if s is a valid domain name, it returns the number of // IsDomainName checks if s is a valid domain name, it returns the number of
// labels and true, when a domain name is valid. Note that non fully qualified // labels and true, when a domain name is valid. Note that non fully qualified
// domain name is considered valid, in this case the last label is counted in // domain name is considered valid, in this case the last label is counted in
// the number of labels. When false is returned the number of labels is not // the number of labels. When false is returned the number of labels is not
// defined. Also note that this function is extremely liberal; almost any // defined. Also note that this function is extremely liberal; almost any
// string is a valid domain name as the DNS is 8 bit protocol. It checks if each // string is a valid domain name as the DNS is 8 bit protocol. It checks if each
// label fits in 63 characters, but there is no length check for the entire // label fits in 63 characters and that the entire name will fit into the 255
// string s. I.e. a domain name longer than 255 characters is considered valid. // octet wire format limit.
func IsDomainName(s string) (labels int, ok bool) { func IsDomainName(s string) (labels int, ok bool) {
_, labels, err := packDomainName(s, nil, 0, nil, false) // XXX: The logic in this function was copied from packDomainName and
return labels, err == nil // should be kept in sync with that function.
const lenmsg = 256
if len(s) == 0 { // Ok, for instance when dealing with update RR without any rdata.
return 0, false
}
s = Fqdn(s)
// Each dot ends a segment of the name. Except for escaped dots (\.), which
// are normal dots.
var (
off int
begin int
wasDot bool
)
for i := 0; i < len(s); i++ {
switch s[i] {
case '\\':
if off+1 > lenmsg {
return labels, false
}
// check for \DDD
if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
i += 3
begin += 3
} else {
i++
begin++
}
wasDot = false
case '.':
if wasDot {
// two dots back to back is not legal
return labels, false
}
wasDot = true
labelLen := i - begin
if labelLen >= 1<<6 { // top two bits of length must be clear
return labels, false
}
// off can already (we're in a loop) be bigger than lenmsg
// this happens when a name isn't fully qualified
off += 1 + labelLen
if off > lenmsg {
return labels, false
}
labels++
begin = i + 1
default:
wasDot = false
}
}
return labels, true
} }
// IsSubDomain checks if child is indeed a child of the parent. If child and parent // IsSubDomain checks if child is indeed a child of the parent. If child and parent
@ -181,7 +257,7 @@ func IsSubDomain(parent, child string) bool {
// The checking is performed on the binary payload. // The checking is performed on the binary payload.
func IsMsg(buf []byte) error { func IsMsg(buf []byte) error {
// Header // Header
if len(buf) < 12 { if len(buf) < headerSize {
return errors.New("dns: bad message header") return errors.New("dns: bad message header")
} }
// Header: Opcode // Header: Opcode
@ -191,11 +267,18 @@ func IsMsg(buf []byte) error {
// IsFqdn checks if a domain name is fully qualified. // IsFqdn checks if a domain name is fully qualified.
func IsFqdn(s string) bool { func IsFqdn(s string) bool {
l := len(s) s2 := strings.TrimSuffix(s, ".")
if l == 0 { if s == s2 {
return false return false
} }
return s[l-1] == '.'
i := strings.LastIndexFunc(s2, func(r rune) bool {
return r != '\\'
})
// Test whether we have an even number of escape sequences before
// the dot or none.
return (len(s2)-i)%2 != 0
} }
// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. // IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181.
@ -244,12 +327,19 @@ func ReverseAddr(addr string) (arpa string, err error) {
if ip == nil { if ip == nil {
return "", &Error{err: "unrecognized address: " + addr} return "", &Error{err: "unrecognized address: " + addr}
} }
if ip.To4() != nil { if v4 := ip.To4(); v4 != nil {
return strconv.Itoa(int(ip[15])) + "." + strconv.Itoa(int(ip[14])) + "." + strconv.Itoa(int(ip[13])) + "." + buf := make([]byte, 0, net.IPv4len*4+len("in-addr.arpa."))
strconv.Itoa(int(ip[12])) + ".in-addr.arpa.", nil // Add it, in reverse, to the buffer
for i := len(v4) - 1; i >= 0; i-- {
buf = strconv.AppendInt(buf, int64(v4[i]), 10)
buf = append(buf, '.')
}
// Append "in-addr.arpa." and return (buf already has the final .)
buf = append(buf, "in-addr.arpa."...)
return string(buf), nil
} }
// Must be IPv6 // Must be IPv6
buf := make([]byte, 0, len(ip)*4+len("ip6.arpa.")) buf := make([]byte, 0, net.IPv6len*4+len("ip6.arpa."))
// Add it, in reverse, to the buffer // Add it, in reverse, to the buffer
for i := len(ip) - 1; i >= 0; i-- { for i := len(ip) - 1; i >= 0; i-- {
v := ip[i] v := ip[i]

61
vendor/github.com/miekg/dns/dns.go generated vendored
View file

@ -34,10 +34,30 @@ type RR interface {
// copy returns a copy of the RR // copy returns a copy of the RR
copy() RR copy() RR
// len returns the length (in octets) of the uncompressed RR in wire format.
len() int // len returns the length (in octets) of the compressed or uncompressed RR in wire format.
// pack packs an RR into wire format. //
pack([]byte, int, map[string]int, bool) (int, error) // If compression is nil, the uncompressed size will be returned, otherwise the compressed
// size will be returned and domain names will be added to the map for future compression.
len(off int, compression map[string]struct{}) int
// pack packs the records RDATA into wire format. The header will
// already have been packed into msg.
pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error)
// unpack unpacks an RR from wire format.
//
// This will only be called on a new and empty RR type with only the header populated. It
// will only be called if the record's RDATA is non-empty.
unpack(msg []byte, off int) (off1 int, err error)
// parse parses an RR from zone file format.
//
// This will only be called on a new and empty RR type with only the header populated.
parse(c *zlexer, origin, file string) *ParseError
// isDuplicate returns whether the two RRs are duplicates.
isDuplicate(r2 RR) bool
} }
// RR_Header is the header all DNS resource records share. // RR_Header is the header all DNS resource records share.
@ -70,28 +90,45 @@ func (h *RR_Header) String() string {
return s return s
} }
func (h *RR_Header) len() int { func (h *RR_Header) len(off int, compression map[string]struct{}) int {
l := len(h.Name) + 1 l := domainNameLen(h.Name, off, compression, true)
l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2) l += 10 // rrtype(2) + class(2) + ttl(4) + rdlength(2)
return l return l
} }
func (h *RR_Header) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
// RR_Header has no RDATA to pack.
return off, nil
}
func (h *RR_Header) unpack(msg []byte, off int) (int, error) {
panic("dns: internal error: unpack should never be called on RR_Header")
}
func (h *RR_Header) parse(c *zlexer, origin, file string) *ParseError {
panic("dns: internal error: parse should never be called on RR_Header")
}
// ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597. // ToRFC3597 converts a known RR to the unknown RR representation from RFC 3597.
func (rr *RFC3597) ToRFC3597(r RR) error { func (rr *RFC3597) ToRFC3597(r RR) error {
buf := make([]byte, r.len()*2) buf := make([]byte, Len(r)*2)
off, err := PackRR(r, buf, 0, nil, false) headerEnd, off, err := packRR(r, buf, 0, compressionMap{}, false)
if err != nil { if err != nil {
return err return err
} }
buf = buf[:off] buf = buf[:off]
if int(r.Header().Rdlength) > off {
return ErrBuf *rr = RFC3597{Hdr: *r.Header()}
rr.Hdr.Rdlength = uint16(off - headerEnd)
if noRdata(rr.Hdr) {
return nil
} }
rfc3597, _, err := unpackRFC3597(*r.Header(), buf, off-int(r.Header().Rdlength)) _, err = rr.unpack(buf, headerEnd)
if err != nil { if err != nil {
return err return err
} }
*rr = *rfc3597.(*RFC3597)
return nil return nil
} }

View file

@ -67,9 +67,6 @@ var AlgorithmToString = map[uint8]string{
PRIVATEOID: "PRIVATEOID", PRIVATEOID: "PRIVATEOID",
} }
// StringToAlgorithm is the reverse of AlgorithmToString.
var StringToAlgorithm = reverseInt8(AlgorithmToString)
// AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's. // AlgorithmToHash is a map of algorithm crypto hash IDs to crypto.Hash's.
var AlgorithmToHash = map[uint8]crypto.Hash{ var AlgorithmToHash = map[uint8]crypto.Hash{
RSAMD5: crypto.MD5, // Deprecated in RFC 6725 RSAMD5: crypto.MD5, // Deprecated in RFC 6725
@ -102,9 +99,6 @@ var HashToString = map[uint8]string{
SHA512: "SHA512", SHA512: "SHA512",
} }
// StringToHash is a map of names to hash IDs.
var StringToHash = reverseInt8(HashToString)
// DNSKEY flag values. // DNSKEY flag values.
const ( const (
SEP = 1 SEP = 1
@ -268,16 +262,17 @@ func (rr *RRSIG) Sign(k crypto.Signer, rrset []RR) error {
return ErrKey return ErrKey
} }
h0 := rrset[0].Header()
rr.Hdr.Rrtype = TypeRRSIG rr.Hdr.Rrtype = TypeRRSIG
rr.Hdr.Name = rrset[0].Header().Name rr.Hdr.Name = h0.Name
rr.Hdr.Class = rrset[0].Header().Class rr.Hdr.Class = h0.Class
if rr.OrigTtl == 0 { // If set don't override if rr.OrigTtl == 0 { // If set don't override
rr.OrigTtl = rrset[0].Header().Ttl rr.OrigTtl = h0.Ttl
} }
rr.TypeCovered = rrset[0].Header().Rrtype rr.TypeCovered = h0.Rrtype
rr.Labels = uint8(CountLabel(rrset[0].Header().Name)) rr.Labels = uint8(CountLabel(h0.Name))
if strings.HasPrefix(rrset[0].Header().Name, "*") { if strings.HasPrefix(h0.Name, "*") {
rr.Labels-- // wildcard, remove from label count rr.Labels-- // wildcard, remove from label count
} }
@ -411,10 +406,7 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error {
// IsRRset checked that we have at least one RR and that the RRs in // IsRRset checked that we have at least one RR and that the RRs in
// the set have consistent type, class, and name. Also check that type and // the set have consistent type, class, and name. Also check that type and
// class matches the RRSIG record. // class matches the RRSIG record.
if rrset[0].Header().Class != rr.Hdr.Class { if h0 := rrset[0].Header(); h0.Class != rr.Hdr.Class || h0.Rrtype != rr.TypeCovered {
return ErrRRset
}
if rrset[0].Header().Rrtype != rr.TypeCovered {
return ErrRRset return ErrRRset
} }
@ -563,20 +555,19 @@ func (k *DNSKEY) publicKeyRSA() *rsa.PublicKey {
pubkey := new(rsa.PublicKey) pubkey := new(rsa.PublicKey)
expo := uint64(0) var expo uint64
for i := 0; i < int(explen); i++ { // The exponent of length explen is between keyoff and modoff.
for _, v := range keybuf[keyoff:modoff] {
expo <<= 8 expo <<= 8
expo |= uint64(keybuf[keyoff+i]) expo |= uint64(v)
} }
if expo > 1<<31-1 { if expo > 1<<31-1 {
// Larger exponent than supported by the crypto package. // Larger exponent than supported by the crypto package.
return nil return nil
} }
pubkey.E = int(expo) pubkey.E = int(expo)
pubkey.N = new(big.Int).SetBytes(keybuf[modoff:])
pubkey.N = big.NewInt(0)
pubkey.N.SetBytes(keybuf[modoff:])
return pubkey return pubkey
} }
@ -601,10 +592,8 @@ func (k *DNSKEY) publicKeyECDSA() *ecdsa.PublicKey {
return nil return nil
} }
} }
pubkey.X = big.NewInt(0) pubkey.X = new(big.Int).SetBytes(keybuf[:len(keybuf)/2])
pubkey.X.SetBytes(keybuf[:len(keybuf)/2]) pubkey.Y = new(big.Int).SetBytes(keybuf[len(keybuf)/2:])
pubkey.Y = big.NewInt(0)
pubkey.Y.SetBytes(keybuf[len(keybuf)/2:])
return pubkey return pubkey
} }
@ -625,10 +614,10 @@ func (k *DNSKEY) publicKeyDSA() *dsa.PublicKey {
p, keybuf := keybuf[:size], keybuf[size:] p, keybuf := keybuf[:size], keybuf[size:]
g, y := keybuf[:size], keybuf[size:] g, y := keybuf[:size], keybuf[size:]
pubkey := new(dsa.PublicKey) pubkey := new(dsa.PublicKey)
pubkey.Parameters.Q = big.NewInt(0).SetBytes(q) pubkey.Parameters.Q = new(big.Int).SetBytes(q)
pubkey.Parameters.P = big.NewInt(0).SetBytes(p) pubkey.Parameters.P = new(big.Int).SetBytes(p)
pubkey.Parameters.G = big.NewInt(0).SetBytes(g) pubkey.Parameters.G = new(big.Int).SetBytes(g)
pubkey.Y = big.NewInt(0).SetBytes(y) pubkey.Y = new(big.Int).SetBytes(y)
return pubkey return pubkey
} }
@ -658,15 +647,16 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
wires := make(wireSlice, len(rrset)) wires := make(wireSlice, len(rrset))
for i, r := range rrset { for i, r := range rrset {
r1 := r.copy() r1 := r.copy()
r1.Header().Ttl = s.OrigTtl h := r1.Header()
labels := SplitDomainName(r1.Header().Name) h.Ttl = s.OrigTtl
labels := SplitDomainName(h.Name)
// 6.2. Canonical RR Form. (4) - wildcards // 6.2. Canonical RR Form. (4) - wildcards
if len(labels) > int(s.Labels) { if len(labels) > int(s.Labels) {
// Wildcard // Wildcard
r1.Header().Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "." h.Name = "*." + strings.Join(labels[len(labels)-int(s.Labels):], ".") + "."
} }
// RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase // RFC 4034: 6.2. Canonical RR Form. (2) - domain name to lowercase
r1.Header().Name = strings.ToLower(r1.Header().Name) h.Name = strings.ToLower(h.Name)
// 6.2. Canonical RR Form. (3) - domain rdata to lowercase. // 6.2. Canonical RR Form. (3) - domain rdata to lowercase.
// NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR, // NS, MD, MF, CNAME, SOA, MB, MG, MR, PTR,
// HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX, // HINFO, MINFO, MX, RP, AFSDB, RT, SIG, PX, NXT, NAPTR, KX,
@ -724,7 +714,7 @@ func rawSignatureData(rrset []RR, s *RRSIG) (buf []byte, err error) {
x.Target = strings.ToLower(x.Target) x.Target = strings.ToLower(x.Target)
} }
// 6.2. Canonical RR Form. (5) - origTTL // 6.2. Canonical RR Form. (5) - origTTL
wire := make([]byte, r1.len()+1) // +1 to be safe(r) wire := make([]byte, Len(r1)+1) // +1 to be safe(r)
off, err1 := PackRR(r1, wire, 0, nil, false) off, err1 := PackRR(r1, wire, 0, nil, false)
if err1 != nil { if err1 != nil {
return nil, err1 return nil, err1

View file

@ -109,21 +109,16 @@ func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
} }
switch k { switch k {
case "modulus": case "modulus":
p.PublicKey.N = big.NewInt(0) p.PublicKey.N = new(big.Int).SetBytes(v1)
p.PublicKey.N.SetBytes(v1)
case "publicexponent": case "publicexponent":
i := big.NewInt(0) i := new(big.Int).SetBytes(v1)
i.SetBytes(v1)
p.PublicKey.E = int(i.Int64()) // int64 should be large enough p.PublicKey.E = int(i.Int64()) // int64 should be large enough
case "privateexponent": case "privateexponent":
p.D = big.NewInt(0) p.D = new(big.Int).SetBytes(v1)
p.D.SetBytes(v1)
case "prime1": case "prime1":
p.Primes[0] = big.NewInt(0) p.Primes[0] = new(big.Int).SetBytes(v1)
p.Primes[0].SetBytes(v1)
case "prime2": case "prime2":
p.Primes[1] = big.NewInt(0) p.Primes[1] = new(big.Int).SetBytes(v1)
p.Primes[1].SetBytes(v1)
} }
case "exponent1", "exponent2", "coefficient": case "exponent1", "exponent2", "coefficient":
// not used in Go (yet) // not used in Go (yet)
@ -136,7 +131,7 @@ func readPrivateKeyRSA(m map[string]string) (*rsa.PrivateKey, error) {
func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) { func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
p := new(dsa.PrivateKey) p := new(dsa.PrivateKey)
p.X = big.NewInt(0) p.X = new(big.Int)
for k, v := range m { for k, v := range m {
switch k { switch k {
case "private_value(x)": case "private_value(x)":
@ -154,7 +149,7 @@ func readPrivateKeyDSA(m map[string]string) (*dsa.PrivateKey, error) {
func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) { func readPrivateKeyECDSA(m map[string]string) (*ecdsa.PrivateKey, error) {
p := new(ecdsa.PrivateKey) p := new(ecdsa.PrivateKey)
p.D = big.NewInt(0) p.D = new(big.Int)
// TODO: validate that the required flags are present // TODO: validate that the required flags are present
for k, v := range m { for k, v := range m {
switch k { switch k {
@ -322,6 +317,11 @@ func (kl *klexer) Next() (lex, bool) {
commt = false commt = false
} }
if kl.key && str.Len() == 0 {
// ignore empty lines
break
}
kl.key = true kl.key = true
l.value = zValue l.value = zValue

View file

@ -13,6 +13,8 @@ import (
const format = "Private-key-format: v1.3\n" const format = "Private-key-format: v1.3\n"
var bigIntOne = big.NewInt(1)
// PrivateKeyString converts a PrivateKey to a string. This string has the same // PrivateKeyString converts a PrivateKey to a string. This string has the same
// format as the private-key-file of BIND9 (Private-key-format: v1.3). // format as the private-key-file of BIND9 (Private-key-format: v1.3).
// It needs some info from the key (the algorithm), so its a method of the DNSKEY // It needs some info from the key (the algorithm), so its a method of the DNSKEY
@ -31,12 +33,11 @@ func (r *DNSKEY) PrivateKeyString(p crypto.PrivateKey) string {
prime2 := toBase64(p.Primes[1].Bytes()) prime2 := toBase64(p.Primes[1].Bytes())
// Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm // Calculate Exponent1/2 and Coefficient as per: http://en.wikipedia.org/wiki/RSA#Using_the_Chinese_remainder_algorithm
// and from: http://code.google.com/p/go/issues/detail?id=987 // and from: http://code.google.com/p/go/issues/detail?id=987
one := big.NewInt(1) p1 := new(big.Int).Sub(p.Primes[0], bigIntOne)
p1 := big.NewInt(0).Sub(p.Primes[0], one) q1 := new(big.Int).Sub(p.Primes[1], bigIntOne)
q1 := big.NewInt(0).Sub(p.Primes[1], one) exp1 := new(big.Int).Mod(p.D, p1)
exp1 := big.NewInt(0).Mod(p.D, p1) exp2 := new(big.Int).Mod(p.D, q1)
exp2 := big.NewInt(0).Mod(p.D, q1) coeff := new(big.Int).ModInverse(p.Primes[1], p.Primes[0])
coeff := big.NewInt(0).ModInverse(p.Primes[1], p.Primes[0])
exponent1 := toBase64(exp1.Bytes()) exponent1 := toBase64(exp1.Bytes())
exponent2 := toBase64(exp2.Bytes()) exponent2 := toBase64(exp2.Bytes())

View file

@ -7,19 +7,32 @@ package dns
// is so, otherwise false. // is so, otherwise false.
// It's is a protocol violation to have identical RRs in a message. // It's is a protocol violation to have identical RRs in a message.
func IsDuplicate(r1, r2 RR) bool { func IsDuplicate(r1, r2 RR) bool {
if r1.Header().Class != r2.Header().Class { // Check whether the record header is identical.
if !r1.Header().isDuplicate(r2.Header()) {
return false return false
} }
if r1.Header().Rrtype != r2.Header().Rrtype {
// Check whether the RDATA is identical.
return r1.isDuplicate(r2)
}
func (r1 *RR_Header) isDuplicate(_r2 RR) bool {
r2, ok := _r2.(*RR_Header)
if !ok {
return false return false
} }
if !isDulicateName(r1.Header().Name, r2.Header().Name) { if r1.Class != r2.Class {
return false
}
if r1.Rrtype != r2.Rrtype {
return false
}
if !isDuplicateName(r1.Name, r2.Name) {
return false return false
} }
// ignore TTL // ignore TTL
return true
return isDuplicateRdata(r1, r2)
} }
// isDulicateName checks if the domain names s1 and s2 are equal. // isDuplicateName checks if the domain names s1 and s2 are equal.
func isDulicateName(s1, s2 string) bool { return equal(s1, s2) } func isDuplicateName(s1, s2 string) bool { return equal(s1, s2) }

View file

@ -57,10 +57,7 @@ func main() {
continue continue
} }
if name == "PrivateRR" || name == "RFC3597" { if name == "PrivateRR" || name == "OPT" {
continue
}
if name == "OPT" || name == "ANY" || name == "IXFR" || name == "AXFR" {
continue continue
} }
@ -70,22 +67,6 @@ func main() {
b := &bytes.Buffer{} b := &bytes.Buffer{}
b.WriteString(packageHdr) b.WriteString(packageHdr)
// Generate the giant switch that calls the correct function for each type.
fmt.Fprint(b, "// isDuplicateRdata calls the rdata specific functions\n")
fmt.Fprint(b, "func isDuplicateRdata(r1, r2 RR) bool {\n")
fmt.Fprint(b, "switch r1.Header().Rrtype {\n")
for _, name := range namedTypes {
o := scope.Lookup(name)
_, isEmbedded := getTypeStruct(o.Type(), scope)
if isEmbedded {
continue
}
fmt.Fprintf(b, "case Type%s:\nreturn isDuplicate%s(r1.(*%s), r2.(*%s))\n", name, name, name, name)
}
fmt.Fprintf(b, "}\nreturn false\n}\n")
// Generate the duplicate check for each type. // Generate the duplicate check for each type.
fmt.Fprint(b, "// isDuplicate() functions\n\n") fmt.Fprint(b, "// isDuplicate() functions\n\n")
for _, name := range namedTypes { for _, name := range namedTypes {
@ -95,7 +76,10 @@ func main() {
if isEmbedded { if isEmbedded {
continue continue
} }
fmt.Fprintf(b, "func isDuplicate%s(r1, r2 *%s) bool {\n", name, name) fmt.Fprintf(b, "func (r1 *%s) isDuplicate(_r2 RR) bool {\n", name)
fmt.Fprintf(b, "r2, ok := _r2.(*%s)\n", name)
fmt.Fprint(b, "if !ok { return false }\n")
fmt.Fprint(b, "_ = r2\n")
for i := 1; i < st.NumFields(); i++ { for i := 1; i < st.NumFields(); i++ {
field := st.Field(i).Name() field := st.Field(i).Name()
o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) } o2 := func(s string) { fmt.Fprintf(b, s+"\n", field, field) }
@ -103,12 +87,12 @@ func main() {
// For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are // For some reason, a and aaaa don't pop up as *types.Slice here (mostly like because the are
// *indirectly* defined as a slice in the net package). // *indirectly* defined as a slice in the net package).
if _, ok := st.Field(i).Type().(*types.Slice); ok || st.Tag(i) == `dns:"a"` || st.Tag(i) == `dns:"aaaa"` { if _, ok := st.Field(i).Type().(*types.Slice); ok {
o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}") o2("if len(r1.%s) != len(r2.%s) {\nreturn false\n}")
if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` { if st.Tag(i) == `dns:"cdomain-name"` || st.Tag(i) == `dns:"domain-name"` {
o3(`for i := 0; i < len(r1.%s); i++ { o3(`for i := 0; i < len(r1.%s); i++ {
if !isDulicateName(r1.%s[i], r2.%s[i]) { if !isDuplicateName(r1.%s[i], r2.%s[i]) {
return false return false
} }
}`) }`)
@ -128,8 +112,10 @@ func main() {
switch st.Tag(i) { switch st.Tag(i) {
case `dns:"-"`: case `dns:"-"`:
// ignored // ignored
case `dns:"a"`, `dns:"aaaa"`:
o2("if !r1.%s.Equal(r2.%s) {\nreturn false\n}")
case `dns:"cdomain-name"`, `dns:"domain-name"`: case `dns:"cdomain-name"`, `dns:"domain-name"`:
o2("if !isDulicateName(r1.%s, r2.%s) {\nreturn false\n}") o2("if !isDuplicateName(r1.%s, r2.%s) {\nreturn false\n}")
default: default:
o2("if r1.%s != r2.%s {\nreturn false\n}") o2("if r1.%s != r2.%s {\nreturn false\n}")
} }

74
vendor/github.com/miekg/dns/edns.go generated vendored
View file

@ -78,16 +78,22 @@ func (rr *OPT) String() string {
return s return s
} }
func (rr *OPT) len() int { func (rr *OPT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
for i := 0; i < len(rr.Option); i++ { for _, o := range rr.Option {
l += 4 // Account for 2-byte option code and 2-byte option length. l += 4 // Account for 2-byte option code and 2-byte option length.
lo, _ := rr.Option[i].pack() lo, _ := o.pack()
l += len(lo) l += len(lo)
} }
return l return l
} }
func (rr *OPT) parse(c *zlexer, origin, file string) *ParseError {
panic("dns: internal error: parse should never be called on OPT")
}
func (r1 *OPT) isDuplicate(r2 RR) bool { return false }
// return the old value -> delete SetVersion? // return the old value -> delete SetVersion?
// Version returns the EDNS version used. Only zero is defined. // Version returns the EDNS version used. Only zero is defined.
@ -153,6 +159,8 @@ type EDNS0 interface {
unpack([]byte) error unpack([]byte) error
// String returns the string representation of the option. // String returns the string representation of the option.
String() string String() string
// copy returns a deep-copy of the option.
copy() EDNS0
} }
// EDNS0_NSID option is used to retrieve a nameserver // EDNS0_NSID option is used to retrieve a nameserver
@ -183,7 +191,8 @@ func (e *EDNS0_NSID) pack() ([]byte, error) {
// Option implements the EDNS0 interface. // Option implements the EDNS0 interface.
func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code. func (e *EDNS0_NSID) Option() uint16 { return EDNS0NSID } // Option returns the option code.
func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil } func (e *EDNS0_NSID) unpack(b []byte) error { e.Nsid = hex.EncodeToString(b); return nil }
func (e *EDNS0_NSID) String() string { return string(e.Nsid) } func (e *EDNS0_NSID) String() string { return e.Nsid }
func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} }
// EDNS0_SUBNET is the subnet option that is used to give the remote nameserver // EDNS0_SUBNET is the subnet option that is used to give the remote nameserver
// an idea of where the client lives. See RFC 7871. It can then give back a different // an idea of where the client lives. See RFC 7871. It can then give back a different
@ -301,6 +310,16 @@ func (e *EDNS0_SUBNET) String() (s string) {
return return
} }
func (e *EDNS0_SUBNET) copy() EDNS0 {
return &EDNS0_SUBNET{
e.Code,
e.Family,
e.SourceNetmask,
e.SourceScope,
e.Address,
}
}
// The EDNS0_COOKIE option is used to add a DNS Cookie to a message. // The EDNS0_COOKIE option is used to add a DNS Cookie to a message.
// //
// o := new(dns.OPT) // o := new(dns.OPT)
@ -336,6 +355,7 @@ func (e *EDNS0_COOKIE) pack() ([]byte, error) {
func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE } func (e *EDNS0_COOKIE) Option() uint16 { return EDNS0COOKIE }
func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil } func (e *EDNS0_COOKIE) unpack(b []byte) error { e.Cookie = hex.EncodeToString(b); return nil }
func (e *EDNS0_COOKIE) String() string { return e.Cookie } func (e *EDNS0_COOKIE) String() string { return e.Cookie }
func (e *EDNS0_COOKIE) copy() EDNS0 { return &EDNS0_COOKIE{e.Code, e.Cookie} }
// The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set // The EDNS0_UL (Update Lease) (draft RFC) option is used to tell the server to set
// an expiration on an update RR. This is helpful for clients that cannot clean // an expiration on an update RR. This is helpful for clients that cannot clean
@ -357,6 +377,7 @@ type EDNS0_UL struct {
// Option implements the EDNS0 interface. // Option implements the EDNS0 interface.
func (e *EDNS0_UL) Option() uint16 { return EDNS0UL } func (e *EDNS0_UL) Option() uint16 { return EDNS0UL }
func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) } func (e *EDNS0_UL) String() string { return strconv.FormatUint(uint64(e.Lease), 10) }
func (e *EDNS0_UL) copy() EDNS0 { return &EDNS0_UL{e.Code, e.Lease} }
// Copied: http://golang.org/src/pkg/net/dnsmsg.go // Copied: http://golang.org/src/pkg/net/dnsmsg.go
func (e *EDNS0_UL) pack() ([]byte, error) { func (e *EDNS0_UL) pack() ([]byte, error) {
@ -411,10 +432,13 @@ func (e *EDNS0_LLQ) unpack(b []byte) error {
func (e *EDNS0_LLQ) String() string { func (e *EDNS0_LLQ) String() string {
s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) + s := strconv.FormatUint(uint64(e.Version), 10) + " " + strconv.FormatUint(uint64(e.Opcode), 10) +
" " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(uint64(e.Id), 10) + " " + strconv.FormatUint(uint64(e.Error), 10) + " " + strconv.FormatUint(e.Id, 10) +
" " + strconv.FormatUint(uint64(e.LeaseLife), 10) " " + strconv.FormatUint(uint64(e.LeaseLife), 10)
return s return s
} }
func (e *EDNS0_LLQ) copy() EDNS0 {
return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife}
}
// EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975. // EDNS0_DUA implements the EDNS0 "DNSSEC Algorithm Understood" option. See RFC 6975.
type EDNS0_DAU struct { type EDNS0_DAU struct {
@ -429,15 +453,16 @@ func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_DAU) String() string { func (e *EDNS0_DAU) String() string {
s := "" s := ""
for i := 0; i < len(e.AlgCode); i++ { for _, alg := range e.AlgCode {
if a, ok := AlgorithmToString[e.AlgCode[i]]; ok { if a, ok := AlgorithmToString[alg]; ok {
s += " " + a s += " " + a
} else { } else {
s += " " + strconv.Itoa(int(e.AlgCode[i])) s += " " + strconv.Itoa(int(alg))
} }
} }
return s return s
} }
func (e *EDNS0_DAU) copy() EDNS0 { return &EDNS0_DAU{e.Code, e.AlgCode} }
// EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975. // EDNS0_DHU implements the EDNS0 "DS Hash Understood" option. See RFC 6975.
type EDNS0_DHU struct { type EDNS0_DHU struct {
@ -452,15 +477,16 @@ func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_DHU) String() string { func (e *EDNS0_DHU) String() string {
s := "" s := ""
for i := 0; i < len(e.AlgCode); i++ { for _, alg := range e.AlgCode {
if a, ok := HashToString[e.AlgCode[i]]; ok { if a, ok := HashToString[alg]; ok {
s += " " + a s += " " + a
} else { } else {
s += " " + strconv.Itoa(int(e.AlgCode[i])) s += " " + strconv.Itoa(int(alg))
} }
} }
return s return s
} }
func (e *EDNS0_DHU) copy() EDNS0 { return &EDNS0_DHU{e.Code, e.AlgCode} }
// EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975. // EDNS0_N3U implements the EDNS0 "NSEC3 Hash Understood" option. See RFC 6975.
type EDNS0_N3U struct { type EDNS0_N3U struct {
@ -476,15 +502,16 @@ func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil }
func (e *EDNS0_N3U) String() string { func (e *EDNS0_N3U) String() string {
// Re-use the hash map // Re-use the hash map
s := "" s := ""
for i := 0; i < len(e.AlgCode); i++ { for _, alg := range e.AlgCode {
if a, ok := HashToString[e.AlgCode[i]]; ok { if a, ok := HashToString[alg]; ok {
s += " " + a s += " " + a
} else { } else {
s += " " + strconv.Itoa(int(e.AlgCode[i])) s += " " + strconv.Itoa(int(alg))
} }
} }
return s return s
} }
func (e *EDNS0_N3U) copy() EDNS0 { return &EDNS0_N3U{e.Code, e.AlgCode} }
// EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314. // EDNS0_EXPIRE implementes the EDNS0 option as described in RFC 7314.
type EDNS0_EXPIRE struct { type EDNS0_EXPIRE struct {
@ -495,13 +522,11 @@ type EDNS0_EXPIRE struct {
// Option implements the EDNS0 interface. // Option implements the EDNS0 interface.
func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE } func (e *EDNS0_EXPIRE) Option() uint16 { return EDNS0EXPIRE }
func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) } func (e *EDNS0_EXPIRE) String() string { return strconv.FormatUint(uint64(e.Expire), 10) }
func (e *EDNS0_EXPIRE) copy() EDNS0 { return &EDNS0_EXPIRE{e.Code, e.Expire} }
func (e *EDNS0_EXPIRE) pack() ([]byte, error) { func (e *EDNS0_EXPIRE) pack() ([]byte, error) {
b := make([]byte, 4) b := make([]byte, 4)
b[0] = byte(e.Expire >> 24) binary.BigEndian.PutUint32(b, e.Expire)
b[1] = byte(e.Expire >> 16)
b[2] = byte(e.Expire >> 8)
b[3] = byte(e.Expire)
return b, nil return b, nil
} }
@ -536,6 +561,11 @@ func (e *EDNS0_LOCAL) Option() uint16 { return e.Code }
func (e *EDNS0_LOCAL) String() string { func (e *EDNS0_LOCAL) String() string {
return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data)
} }
func (e *EDNS0_LOCAL) copy() EDNS0 {
b := make([]byte, len(e.Data))
copy(b, e.Data)
return &EDNS0_LOCAL{e.Code, b}
}
func (e *EDNS0_LOCAL) pack() ([]byte, error) { func (e *EDNS0_LOCAL) pack() ([]byte, error) {
b := make([]byte, len(e.Data)) b := make([]byte, len(e.Data))
@ -608,6 +638,7 @@ func (e *EDNS0_TCP_KEEPALIVE) String() (s string) {
} }
return return
} }
func (e *EDNS0_TCP_KEEPALIVE) copy() EDNS0 { return &EDNS0_TCP_KEEPALIVE{e.Code, e.Length, e.Timeout} }
// EDNS0_PADDING option is used to add padding to a request/response. The default // EDNS0_PADDING option is used to add padding to a request/response. The default
// value of padding SHOULD be 0x0 but other values MAY be used, for instance if // value of padding SHOULD be 0x0 but other values MAY be used, for instance if
@ -621,3 +652,8 @@ func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING }
func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil }
func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil }
func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) }
func (e *EDNS0_PADDING) copy() EDNS0 {
b := make([]byte, len(e.Padding))
copy(b, e.Padding)
return &EDNS0_PADDING{b}
}

View file

@ -20,7 +20,7 @@ func Field(r RR, i int) string {
return "" return ""
} }
d := reflect.ValueOf(r).Elem().Field(i) d := reflect.ValueOf(r).Elem().Field(i)
switch k := d.Kind(); k { switch d.Kind() {
case reflect.String: case reflect.String:
return d.String() return d.String()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
@ -31,6 +31,9 @@ func Field(r RR, i int) string {
switch reflect.ValueOf(r).Elem().Type().Field(i).Tag { switch reflect.ValueOf(r).Elem().Type().Field(i).Tag {
case `dns:"a"`: case `dns:"a"`:
// TODO(miek): Hmm store this as 16 bytes // TODO(miek): Hmm store this as 16 bytes
if d.Len() < net.IPv4len {
return ""
}
if d.Len() < net.IPv6len { if d.Len() < net.IPv6len {
return net.IPv4(byte(d.Index(0).Uint()), return net.IPv4(byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()), byte(d.Index(1).Uint()),
@ -42,6 +45,9 @@ func Field(r RR, i int) string {
byte(d.Index(14).Uint()), byte(d.Index(14).Uint()),
byte(d.Index(15).Uint())).String() byte(d.Index(15).Uint())).String()
case `dns:"aaaa"`: case `dns:"aaaa"`:
if d.Len() < net.IPv6len {
return ""
}
return net.IP{ return net.IP{
byte(d.Index(0).Uint()), byte(d.Index(0).Uint()),
byte(d.Index(1).Uint()), byte(d.Index(1).Uint()),

View file

@ -16,7 +16,7 @@ func SplitDomainName(s string) (labels []string) {
fqdnEnd := 0 // offset of the final '.' or the length of the name fqdnEnd := 0 // offset of the final '.' or the length of the name
idx := Split(s) idx := Split(s)
begin := 0 begin := 0
if s[len(s)-1] == '.' { if IsFqdn(s) {
fqdnEnd = len(s) - 1 fqdnEnd = len(s) - 1
} else { } else {
fqdnEnd = len(s) fqdnEnd = len(s)
@ -28,16 +28,13 @@ func SplitDomainName(s string) (labels []string) {
case 1: case 1:
// no-op // no-op
default: default:
end := 0 for _, end := range idx[1:] {
for i := 1; i < len(idx); i++ {
end = idx[i]
labels = append(labels, s[begin:end-1]) labels = append(labels, s[begin:end-1])
begin = end begin = end
} }
} }
labels = append(labels, s[begin:fqdnEnd]) return append(labels, s[begin:fqdnEnd])
return labels
} }
// CompareDomainName compares the names s1 and s2 and // CompareDomainName compares the names s1 and s2 and

467
vendor/github.com/miekg/dns/msg.go generated vendored
View file

@ -9,7 +9,6 @@
package dns package dns
//go:generate go run msg_generate.go //go:generate go run msg_generate.go
//go:generate go run compress_generate.go
import ( import (
crand "crypto/rand" crand "crypto/rand"
@ -18,6 +17,7 @@ import (
"math/big" "math/big"
"math/rand" "math/rand"
"strconv" "strconv"
"strings"
"sync" "sync"
) )
@ -36,6 +36,16 @@ const (
// not something a well written implementation should ever do, so we leave them // not something a well written implementation should ever do, so we leave them
// to trip the maximum compression pointer check. // to trip the maximum compression pointer check.
maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2 maxCompressionPointers = (maxDomainNameWireOctets+1)/2 - 2
// This is the maximum length of a domain name in presentation format. The
// maximum wire length of a domain name is 255 octets (see above), with the
// maximum label length being 63. The wire format requires one extra byte over
// the presentation format, reducing the number of octets by 1. Each label in
// the name will be separated by a single period, with each octet in the label
// expanding to at most 4 bytes (\DDD). If all other labels are of the maximum
// length, then the final label can only be 61 octets long to not exceed the
// maximum allowed wire length.
maxDomainNamePresentationLength = 61*4 + 1 + 63*4 + 1 + 63*4 + 1 + 63*4 + 1
) )
// Errors defined in this package. // Errors defined in this package.
@ -180,6 +190,39 @@ var RcodeToString = map[int]string{
RcodeBadCookie: "BADCOOKIE", RcodeBadCookie: "BADCOOKIE",
} }
// compressionMap is used to allow a more efficient compression map
// to be used for internal packDomainName calls without changing the
// signature or functionality of public API.
//
// In particular, map[string]uint16 uses 25% less per-entry memory
// than does map[string]int.
type compressionMap struct {
ext map[string]int // external callers
int map[string]uint16 // internal callers
}
func (m compressionMap) valid() bool {
return m.int != nil || m.ext != nil
}
func (m compressionMap) insert(s string, pos int) {
if m.ext != nil {
m.ext[s] = pos
} else {
m.int[s] = uint16(pos)
}
}
func (m compressionMap) find(s string) (int, bool) {
if m.ext != nil {
pos, ok := m.ext[s]
return pos, ok
}
pos, ok := m.int[s]
return int(pos), ok
}
// Domain names are a sequence of counted strings // Domain names are a sequence of counted strings
// split at the dots. They end with a zero-length string. // split at the dots. They end with a zero-length string.
@ -188,29 +231,21 @@ var RcodeToString = map[int]string{
// map needs to hold a mapping between domain names and offsets // map needs to hold a mapping between domain names and offsets
// pointing into msg. // pointing into msg.
func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { func PackDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
off1, _, err = packDomainName(s, msg, off, compression, compress) return packDomainName(s, msg, off, compressionMap{ext: compression}, compress)
return
} }
func packDomainName(s string, msg []byte, off int, compression map[string]int, compress bool) (off1 int, labels int, err error) { func packDomainName(s string, msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {
// special case if msg == nil // XXX: A logical copy of this function exists in IsDomainName and
lenmsg := 256 // should be kept in sync with this function.
if msg != nil {
lenmsg = len(msg)
}
ls := len(s) ls := len(s)
if ls == 0 { // Ok, for instance when dealing with update RR without any rdata. if ls == 0 { // Ok, for instance when dealing with update RR without any rdata.
return off, 0, nil return off, nil
} }
// If not fully qualified, error out, but only if msg != nil #ugly // If not fully qualified, error out.
if s[ls-1] != '.' { if !IsFqdn(s) {
if msg != nil { return len(msg), ErrFqdn
return lenmsg, 0, ErrFqdn
}
s += "."
ls++
} }
// Each dot ends a segment of the name. // Each dot ends a segment of the name.
@ -223,9 +258,11 @@ func packDomainName(s string, msg []byte, off int, compression map[string]int, c
// Emit sequence of counted strings, chopping at dots. // Emit sequence of counted strings, chopping at dots.
var ( var (
begin int begin int
bs []byte compBegin int
wasDot bool compOff int
bs []byte
wasDot bool
) )
loop: loop:
for i := 0; i < ls; i++ { for i := 0; i < ls; i++ {
@ -238,8 +275,8 @@ loop:
switch c { switch c {
case '\\': case '\\':
if off+1 > lenmsg { if off+1 > len(msg) {
return lenmsg, labels, ErrBuf return len(msg), ErrBuf
} }
if bs == nil { if bs == nil {
@ -251,45 +288,37 @@ loop:
bs[i] = dddToByte(bs[i+1:]) bs[i] = dddToByte(bs[i+1:])
copy(bs[i+1:ls-3], bs[i+4:]) copy(bs[i+1:ls-3], bs[i+4:])
ls -= 3 ls -= 3
compOff += 3
} else { } else {
copy(bs[i:ls-1], bs[i+1:]) copy(bs[i:ls-1], bs[i+1:])
ls-- ls--
compOff++
} }
wasDot = false wasDot = false
case '.': case '.':
if wasDot { if wasDot {
// two dots back to back is not legal // two dots back to back is not legal
return lenmsg, labels, ErrRdata return len(msg), ErrRdata
} }
wasDot = true wasDot = true
labelLen := i - begin labelLen := i - begin
if labelLen >= 1<<6 { // top two bits of length must be clear if labelLen >= 1<<6 { // top two bits of length must be clear
return lenmsg, labels, ErrRdata return len(msg), ErrRdata
} }
// off can already (we're in a loop) be bigger than len(msg) // off can already (we're in a loop) be bigger than len(msg)
// this happens when a name isn't fully qualified // this happens when a name isn't fully qualified
if off+1+labelLen > lenmsg { if off+1+labelLen > len(msg) {
return lenmsg, labels, ErrBuf return len(msg), ErrBuf
} }
// Don't try to compress '.' // Don't try to compress '.'
// We should only compress when compress is true, but we should also still pick // We should only compress when compress is true, but we should also still pick
// up names that can be used for *future* compression(s). // up names that can be used for *future* compression(s).
if compression != nil && !isRootLabel(s, bs, begin, ls) { if compression.valid() && !isRootLabel(s, bs, begin, ls) {
var ( if p, ok := compression.find(s[compBegin:]); ok {
p int
ok bool
)
if bs == nil {
p, ok = compression[s[begin:]]
} else {
p, ok = compression[string(bs[begin:ls])]
}
if ok {
// The first hit is the longest matching dname // The first hit is the longest matching dname
// keep the pointer offset we get back and store // keep the pointer offset we get back and store
// the offset of the current name, because that's // the offset of the current name, because that's
@ -302,28 +331,22 @@ loop:
} }
} else if off < maxCompressionOffset { } else if off < maxCompressionOffset {
// Only offsets smaller than maxCompressionOffset can be used. // Only offsets smaller than maxCompressionOffset can be used.
if bs == nil { compression.insert(s[compBegin:], off)
compression[s[begin:]] = off
} else {
compression[string(bs[begin:ls])] = off
}
} }
} }
// The following is covered by the length check above. // The following is covered by the length check above.
if msg != nil { msg[off] = byte(labelLen)
msg[off] = byte(labelLen)
if bs == nil { if bs == nil {
copy(msg[off+1:], s[begin:i]) copy(msg[off+1:], s[begin:i])
} else { } else {
copy(msg[off+1:], bs[begin:i]) copy(msg[off+1:], bs[begin:i])
}
} }
off += 1 + labelLen off += 1 + labelLen
labels++
begin = i + 1 begin = i + 1
compBegin = begin + compOff
default: default:
wasDot = false wasDot = false
} }
@ -331,22 +354,21 @@ loop:
// Root label is special // Root label is special
if isRootLabel(s, bs, 0, ls) { if isRootLabel(s, bs, 0, ls) {
return off, labels, nil return off, nil
} }
// If we did compression and we find something add the pointer here // If we did compression and we find something add the pointer here
if pointer != -1 { if pointer != -1 {
// We have two bytes (14 bits) to put the pointer in // We have two bytes (14 bits) to put the pointer in
// if msg == nil, we will never do compression
binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000)) binary.BigEndian.PutUint16(msg[off:], uint16(pointer^0xC000))
return off + 2, labels, nil return off + 2, nil
} }
if msg != nil && off < lenmsg { if off < len(msg) {
msg[off] = 0 msg[off] = 0
} }
return off + 1, labels, nil return off + 1, nil
} }
// isRootLabel returns whether s or bs, from off to end, is the root // isRootLabel returns whether s or bs, from off to end, is the root
@ -381,7 +403,7 @@ func isRootLabel(s string, bs []byte, off, end int) bool {
// When an error is encountered, the unpacked name will be discarded // When an error is encountered, the unpacked name will be discarded
// and len(msg) will be returned as the offset. // and len(msg) will be returned as the offset.
func UnpackDomainName(msg []byte, off int) (string, int, error) { func UnpackDomainName(msg []byte, off int) (string, int, error) {
s := make([]byte, 0, 64) s := make([]byte, 0, maxDomainNamePresentationLength)
off1 := 0 off1 := 0
lenmsg := len(msg) lenmsg := len(msg)
budget := maxDomainNameWireOctets budget := maxDomainNameWireOctets
@ -407,21 +429,15 @@ Loop:
if budget <= 0 { if budget <= 0 {
return "", lenmsg, ErrLongDomain return "", lenmsg, ErrLongDomain
} }
for j := off; j < off+c; j++ { for _, b := range msg[off : off+c] {
switch b := msg[j]; b { switch b {
case '.', '(', ')', ';', ' ', '@': case '.', '(', ')', ';', ' ', '@':
fallthrough fallthrough
case '"', '\\': case '"', '\\':
s = append(s, '\\', b) s = append(s, '\\', b)
default: default:
if b < 32 || b >= 127 { // unprintable, use \DDD if b < ' ' || b > '~' { // unprintable, use \DDD
var buf [3]byte s = append(s, escapeByte(b)...)
bufs := strconv.AppendInt(buf[:0], int64(b), 10)
s = append(s, '\\')
for i := len(bufs); i < 3; i++ {
s = append(s, '0')
}
s = append(s, bufs...)
} else { } else {
s = append(s, b) s = append(s, b)
} }
@ -473,11 +489,11 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) {
return offset, nil return offset, nil
} }
var err error var err error
for i := range txt { for _, s := range txt {
if len(txt[i]) > len(tmp) { if len(s) > len(tmp) {
return offset, ErrBuf return offset, ErrBuf
} }
offset, err = packTxtString(txt[i], msg, offset, tmp) offset, err = packTxtString(s, msg, offset, tmp)
if err != nil { if err != nil {
return offset, err return offset, err
} }
@ -591,19 +607,38 @@ func intToBytes(i *big.Int, length int) []byte {
// PackRR packs a resource record rr into msg[off:]. // PackRR packs a resource record rr into msg[off:].
// See PackDomainName for documentation about the compression. // See PackDomainName for documentation about the compression.
func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { func PackRR(rr RR, msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) {
headerEnd, off1, err := packRR(rr, msg, off, compressionMap{ext: compression}, compress)
if err == nil {
// packRR no longer sets the Rdlength field on the rr, but
// callers might be expecting it so we set it here.
rr.Header().Rdlength = uint16(off1 - headerEnd)
}
return off1, err
}
func packRR(rr RR, msg []byte, off int, compression compressionMap, compress bool) (headerEnd int, off1 int, err error) {
if rr == nil { if rr == nil {
return len(msg), &Error{err: "nil rr"} return len(msg), len(msg), &Error{err: "nil rr"}
} }
off1, err = rr.pack(msg, off, compression, compress) headerEnd, err = rr.Header().packHeader(msg, off, compression, compress)
if err != nil { if err != nil {
return len(msg), err return headerEnd, len(msg), err
} }
// TODO(miek): Not sure if this is needed? If removed we can remove rawmsg.go as well.
if rawSetRdlength(msg, off, off1) { off1, err = rr.pack(msg, headerEnd, compression, compress)
return off1, nil if err != nil {
return headerEnd, len(msg), err
} }
return off, ErrRdata
rdlength := off1 - headerEnd
if int(uint16(rdlength)) != rdlength { // overflow
return headerEnd, len(msg), ErrRdata
}
// The RDLENGTH field is the last field in the header and we set it here.
binary.BigEndian.PutUint16(msg[headerEnd-2:], uint16(rdlength))
return headerEnd, off1, nil
} }
// UnpackRR unpacks msg[off:] into an RR. // UnpackRR unpacks msg[off:] into an RR.
@ -619,17 +654,28 @@ func UnpackRR(msg []byte, off int) (rr RR, off1 int, err error) {
// UnpackRRWithHeader unpacks the record type specific payload given an existing // UnpackRRWithHeader unpacks the record type specific payload given an existing
// RR_Header. // RR_Header.
func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) { func UnpackRRWithHeader(h RR_Header, msg []byte, off int) (rr RR, off1 int, err error) {
if newFn, ok := TypeToRR[h.Rrtype]; ok {
rr = newFn()
*rr.Header() = h
} else {
rr = &RFC3597{Hdr: h}
}
if noRdata(h) {
return rr, off, nil
}
end := off + int(h.Rdlength) end := off + int(h.Rdlength)
if fn, known := typeToUnpack[h.Rrtype]; !known { off, err = rr.unpack(msg, off)
rr, off, err = unpackRFC3597(h, msg, off) if err != nil {
} else { return nil, end, err
rr, off, err = fn(h, msg, off)
} }
if off != end { if off != end {
return &h, end, &Error{err: "bad rdlength"} return &h, end, &Error{err: "bad rdlength"}
} }
return rr, off, err
return rr, off, nil
} }
// unpackRRslice unpacks msg[off:] into an []RR. // unpackRRslice unpacks msg[off:] into an []RR.
@ -713,15 +759,15 @@ func (dns *Msg) PackBuffer(buf []byte) (msg []byte, err error) {
// If this message can't be compressed, avoid filling the // If this message can't be compressed, avoid filling the
// compression map and creating garbage. // compression map and creating garbage.
if dns.Compress && dns.isCompressible() { if dns.Compress && dns.isCompressible() {
compression := make(map[string]int) // Compression pointer mappings. compression := make(map[string]uint16) // Compression pointer mappings.
return dns.packBufferWithCompressionMap(buf, compression, true) return dns.packBufferWithCompressionMap(buf, compressionMap{int: compression}, true)
} }
return dns.packBufferWithCompressionMap(buf, nil, false) return dns.packBufferWithCompressionMap(buf, compressionMap{}, false)
} }
// packBufferWithCompressionMap packs a Msg, using the given buffer buf. // packBufferWithCompressionMap packs a Msg, using the given buffer buf.
func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]int, compress bool) (msg []byte, err error) { func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression compressionMap, compress bool) (msg []byte, err error) {
if dns.Rcode < 0 || dns.Rcode > 0xFFF { if dns.Rcode < 0 || dns.Rcode > 0xFFF {
return nil, ErrRcode return nil, ErrRcode
} }
@ -771,7 +817,7 @@ func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]
// We need the uncompressed length here, because we first pack it and then compress it. // We need the uncompressed length here, because we first pack it and then compress it.
msg = buf msg = buf
uncompressedLen := compressedLen(dns, false) uncompressedLen := msgLenWithCompressionMap(dns, nil)
if packLen := uncompressedLen + 1; len(msg) < packLen { if packLen := uncompressedLen + 1; len(msg) < packLen {
msg = make([]byte, packLen) msg = make([]byte, packLen)
} }
@ -789,19 +835,19 @@ func (dns *Msg) packBufferWithCompressionMap(buf []byte, compression map[string]
} }
} }
for _, r := range dns.Answer { for _, r := range dns.Answer {
off, err = PackRR(r, msg, off, compression, compress) _, off, err = packRR(r, msg, off, compression, compress)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
for _, r := range dns.Ns { for _, r := range dns.Ns {
off, err = PackRR(r, msg, off, compression, compress) _, off, err = packRR(r, msg, off, compression, compress)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
for _, r := range dns.Extra { for _, r := range dns.Extra {
off, err = PackRR(r, msg, off, compression, compress) _, off, err = packRR(r, msg, off, compression, compress)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -888,43 +934,37 @@ func (dns *Msg) String() string {
s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n"
if len(dns.Question) > 0 { if len(dns.Question) > 0 {
s += "\n;; QUESTION SECTION:\n" s += "\n;; QUESTION SECTION:\n"
for i := 0; i < len(dns.Question); i++ { for _, r := range dns.Question {
s += dns.Question[i].String() + "\n" s += r.String() + "\n"
} }
} }
if len(dns.Answer) > 0 { if len(dns.Answer) > 0 {
s += "\n;; ANSWER SECTION:\n" s += "\n;; ANSWER SECTION:\n"
for i := 0; i < len(dns.Answer); i++ { for _, r := range dns.Answer {
if dns.Answer[i] != nil { if r != nil {
s += dns.Answer[i].String() + "\n" s += r.String() + "\n"
} }
} }
} }
if len(dns.Ns) > 0 { if len(dns.Ns) > 0 {
s += "\n;; AUTHORITY SECTION:\n" s += "\n;; AUTHORITY SECTION:\n"
for i := 0; i < len(dns.Ns); i++ { for _, r := range dns.Ns {
if dns.Ns[i] != nil { if r != nil {
s += dns.Ns[i].String() + "\n" s += r.String() + "\n"
} }
} }
} }
if len(dns.Extra) > 0 { if len(dns.Extra) > 0 {
s += "\n;; ADDITIONAL SECTION:\n" s += "\n;; ADDITIONAL SECTION:\n"
for i := 0; i < len(dns.Extra); i++ { for _, r := range dns.Extra {
if dns.Extra[i] != nil { if r != nil {
s += dns.Extra[i].String() + "\n" s += r.String() + "\n"
} }
} }
} }
return s return s
} }
// Len returns the message length when in (un)compressed wire format.
// If dns.Compress is true compression it is taken into account. Len()
// is provided to be a faster way to get the size of the resulting packet,
// than packing it, measuring the size and discarding the buffer.
func (dns *Msg) Len() int { return compressedLen(dns, dns.Compress) }
// isCompressible returns whether the msg may be compressible. // isCompressible returns whether the msg may be compressible.
func (dns *Msg) isCompressible() bool { func (dns *Msg) isCompressible() bool {
// If we only have one question, there is nothing we can ever compress. // If we only have one question, there is nothing we can ever compress.
@ -932,148 +972,110 @@ func (dns *Msg) isCompressible() bool {
len(dns.Ns) > 0 || len(dns.Extra) > 0 len(dns.Ns) > 0 || len(dns.Extra) > 0
} }
func compressedLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int { // Len returns the message length when in (un)compressed wire format.
l := 12 // Message header is always 12 bytes // If dns.Compress is true compression it is taken into account. Len()
for _, r := range dns.Question { // is provided to be a faster way to get the size of the resulting packet,
compressionLenHelper(compression, r.Name, l) // than packing it, measuring the size and discarding the buffer.
l += r.len() func (dns *Msg) Len() int {
}
l += compressionLenSlice(l, compression, dns.Answer)
l += compressionLenSlice(l, compression, dns.Ns)
l += compressionLenSlice(l, compression, dns.Extra)
return l
}
// compressedLen returns the message length when in compressed wire format
// when compress is true, otherwise the uncompressed length is returned.
func compressedLen(dns *Msg, compress bool) int {
// We always return one more than needed.
// If this message can't be compressed, avoid filling the // If this message can't be compressed, avoid filling the
// compression map and creating garbage. // compression map and creating garbage.
if compress && dns.isCompressible() { if dns.Compress && dns.isCompressible() {
compression := make(map[string]struct{}) compression := make(map[string]struct{})
return compressedLenWithCompressionMap(dns, compression) return msgLenWithCompressionMap(dns, compression)
} }
l := 12 // Message header is always 12 bytes return msgLenWithCompressionMap(dns, nil)
}
func msgLenWithCompressionMap(dns *Msg, compression map[string]struct{}) int {
l := headerSize
for _, r := range dns.Question { for _, r := range dns.Question {
l += r.len() l += r.len(l, compression)
} }
for _, r := range dns.Answer { for _, r := range dns.Answer {
if r != nil { if r != nil {
l += r.len() l += r.len(l, compression)
} }
} }
for _, r := range dns.Ns { for _, r := range dns.Ns {
if r != nil { if r != nil {
l += r.len() l += r.len(l, compression)
} }
} }
for _, r := range dns.Extra { for _, r := range dns.Extra {
if r != nil { if r != nil {
l += r.len() l += r.len(l, compression)
} }
} }
return l return l
} }
func compressionLenSlice(lenp int, c map[string]struct{}, rs []RR) int { func domainNameLen(s string, off int, compression map[string]struct{}, compress bool) int {
initLen := lenp if s == "" || s == "." {
for _, r := range rs { return 1
if r == nil { }
escaped := strings.Contains(s, "\\")
if compression != nil && (compress || off < maxCompressionOffset) {
// compressionLenSearch will insert the entry into the compression
// map if it doesn't contain it.
if l, ok := compressionLenSearch(compression, s, off); ok && compress {
if escaped {
return escapedNameLen(s[:l]) + 2
}
return l + 2
}
}
if escaped {
return escapedNameLen(s) + 1
}
return len(s) + 1
}
func escapedNameLen(s string) int {
nameLen := len(s)
for i := 0; i < len(s); i++ {
if s[i] != '\\' {
continue continue
} }
// TmpLen is to track len of record at 14bits boudaries
tmpLen := lenp
x := r.len() if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) {
// track this length, and the global length in len, while taking compression into account for both. nameLen -= 3
k, ok, _ := compressionLenSearch(c, r.Header().Name) i += 3
if ok {
// Size of x is reduced by k, but we add 1 since k includes the '.' and label descriptor take 2 bytes
// so, basically x:= x - k - 1 + 2
x += 1 - k
}
tmpLen += compressionLenHelper(c, r.Header().Name, tmpLen)
k, ok, _ = compressionLenSearchType(c, r)
if ok {
x += 1 - k
}
lenp += x
tmpLen = lenp
tmpLen += compressionLenHelperType(c, r, tmpLen)
}
return lenp - initLen
}
// Put the parts of the name in the compression map, return the size in bytes added in payload
func compressionLenHelper(c map[string]struct{}, s string, currentLen int) int {
if currentLen > maxCompressionOffset {
// We won't be able to add any label that could be re-used later anyway
return 0
}
if _, ok := c[s]; ok {
return 0
}
initLen := currentLen
pref := ""
prev := s
lbs := Split(s)
for j := 0; j < len(lbs); j++ {
pref = s[lbs[j]:]
currentLen += len(prev) - len(pref)
prev = pref
if _, ok := c[pref]; !ok {
// If first byte label is within the first 14bits, it might be re-used later
if currentLen < maxCompressionOffset {
c[pref] = struct{}{}
}
} else { } else {
added := currentLen - initLen nameLen--
if j > 0 { i++
// We added a new PTR
added += 2
}
return added
} }
} }
return currentLen - initLen
return nameLen
} }
// Look for each part in the compression map and returns its length, func compressionLenSearch(c map[string]struct{}, s string, msgOff int) (int, bool) {
// keep on searching so we get the longest match. for off, end := 0, false; !end; off, end = NextLabel(s, off) {
// Will return the size of compression found, whether a match has been
// found and the size of record if added in payload
func compressionLenSearch(c map[string]struct{}, s string) (int, bool, int) {
off := 0
end := false
if s == "" { // don't bork on bogus data
return 0, false, 0
}
fullSize := 0
for {
if _, ok := c[s[off:]]; ok { if _, ok := c[s[off:]]; ok {
return len(s[off:]), true, fullSize + off return off, true
} }
if end {
break if msgOff+off < maxCompressionOffset {
c[s[off:]] = struct{}{}
} }
// Each label descriptor takes 2 bytes, add it
fullSize += 2
off, end = NextLabel(s, off)
} }
return 0, false, fullSize + len(s)
return 0, false
} }
// Copy returns a new RR which is a deep-copy of r. // Copy returns a new RR which is a deep-copy of r.
func Copy(r RR) RR { r1 := r.copy(); return r1 } func Copy(r RR) RR { return r.copy() }
// Len returns the length (in octets) of the uncompressed RR in wire format. // Len returns the length (in octets) of the uncompressed RR in wire format.
func Len(r RR) int { return r.len() } func Len(r RR) int { return r.len(0, nil) }
// Copy returns a new *Msg which is a deep-copy of dns. // Copy returns a new *Msg which is a deep-copy of dns.
func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) } func (dns *Msg) Copy() *Msg { return dns.CopyTo(new(Msg)) }
@ -1089,40 +1091,27 @@ func (dns *Msg) CopyTo(r1 *Msg) *Msg {
} }
rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra))
var rri int r1.Answer, rrArr = rrArr[:0:len(dns.Answer)], rrArr[len(dns.Answer):]
r1.Ns, rrArr = rrArr[:0:len(dns.Ns)], rrArr[len(dns.Ns):]
r1.Extra = rrArr[:0:len(dns.Extra)]
if len(dns.Answer) > 0 { for _, r := range dns.Answer {
rrbegin := rri r1.Answer = append(r1.Answer, r.copy())
for i := 0; i < len(dns.Answer); i++ {
rrArr[rri] = dns.Answer[i].copy()
rri++
}
r1.Answer = rrArr[rrbegin:rri:rri]
} }
if len(dns.Ns) > 0 { for _, r := range dns.Ns {
rrbegin := rri r1.Ns = append(r1.Ns, r.copy())
for i := 0; i < len(dns.Ns); i++ {
rrArr[rri] = dns.Ns[i].copy()
rri++
}
r1.Ns = rrArr[rrbegin:rri:rri]
} }
if len(dns.Extra) > 0 { for _, r := range dns.Extra {
rrbegin := rri r1.Extra = append(r1.Extra, r.copy())
for i := 0; i < len(dns.Extra); i++ {
rrArr[rri] = dns.Extra[i].copy()
rri++
}
r1.Extra = rrArr[rrbegin:rri:rri]
} }
return r1 return r1
} }
func (q *Question) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { func (q *Question) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
off, err := PackDomainName(q.Name, msg, off, compression, compress) off, err := packDomainName(q.Name, msg, off, compression, compress)
if err != nil { if err != nil {
return off, err return off, err
} }
@ -1163,7 +1152,7 @@ func unpackQuestion(msg []byte, off int) (Question, int, error) {
return q, off, err return q, off, err
} }
func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) { func (dh *Header) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
off, err := packUint16(dh.Id, msg, off) off, err := packUint16(dh.Id, msg, off)
if err != nil { if err != nil {
return off, err return off, err
@ -1185,7 +1174,10 @@ func (dh *Header) pack(msg []byte, off int, compression map[string]int, compress
return off, err return off, err
} }
off, err = packUint16(dh.Arcount, msg, off) off, err = packUint16(dh.Arcount, msg, off)
return off, err if err != nil {
return off, err
}
return off, nil
} }
func unpackMsgHdr(msg []byte, off int) (Header, int, error) { func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
@ -1214,7 +1206,10 @@ func unpackMsgHdr(msg []byte, off int) (Header, int, error) {
return dh, off, err return dh, off, err
} }
dh.Arcount, off, err = unpackUint16(msg, off) dh.Arcount, off, err = unpackUint16(msg, off)
return dh, off, err if err != nil {
return dh, off, err
}
return dh, off, nil
} }
// setHdr set the header in the dns using the binary data in dh. // setHdr set the header in the dns using the binary data in dh.

View file

@ -80,13 +80,7 @@ func main() {
o := scope.Lookup(name) o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope) st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {\n", name) fmt.Fprintf(b, "func (rr *%s) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) {\n", name)
fmt.Fprint(b, `off, err := rr.Hdr.pack(msg, off, compression, compress)
if err != nil {
return off, err
}
headerEnd := off
`)
for i := 1; i < st.NumFields(); i++ { for i := 1; i < st.NumFields(); i++ {
o := func(s string) { o := func(s string) {
fmt.Fprintf(b, s, st.Field(i).Name()) fmt.Fprintf(b, s, st.Field(i).Name())
@ -106,7 +100,7 @@ return off, err
case `dns:"nsec"`: case `dns:"nsec"`:
o("off, err = packDataNsec(rr.%s, msg, off)\n") o("off, err = packDataNsec(rr.%s, msg, off)\n")
case `dns:"domain-name"`: case `dns:"domain-name"`:
o("off, err = packDataDomainNames(rr.%s, msg, off, compression, compress)\n") o("off, err = packDataDomainNames(rr.%s, msg, off, compression, false)\n")
default: default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
} }
@ -116,9 +110,9 @@ return off, err
switch { switch {
case st.Tag(i) == `dns:"-"`: // ignored case st.Tag(i) == `dns:"-"`: // ignored
case st.Tag(i) == `dns:"cdomain-name"`: case st.Tag(i) == `dns:"cdomain-name"`:
o("off, err = PackDomainName(rr.%s, msg, off, compression, compress)\n") o("off, err = packDomainName(rr.%s, msg, off, compression, compress)\n")
case st.Tag(i) == `dns:"domain-name"`: case st.Tag(i) == `dns:"domain-name"`:
o("off, err = PackDomainName(rr.%s, msg, off, compression, false)\n") o("off, err = packDomainName(rr.%s, msg, off, compression, false)\n")
case st.Tag(i) == `dns:"a"`: case st.Tag(i) == `dns:"a"`:
o("off, err = packDataA(rr.%s, msg, off)\n") o("off, err = packDataA(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"aaaa"`: case st.Tag(i) == `dns:"aaaa"`:
@ -154,7 +148,8 @@ if rr.%s != "-" {
fallthrough fallthrough
case st.Tag(i) == `dns:"hex"`: case st.Tag(i) == `dns:"hex"`:
o("off, err = packStringHex(rr.%s, msg, off)\n") o("off, err = packStringHex(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"any"`:
o("off, err = packStringAny(rr.%s, msg, off)\n")
case st.Tag(i) == `dns:"octet"`: case st.Tag(i) == `dns:"octet"`:
o("off, err = packStringOctet(rr.%s, msg, off)\n") o("off, err = packStringOctet(rr.%s, msg, off)\n")
case st.Tag(i) == "": case st.Tag(i) == "":
@ -176,8 +171,6 @@ if rr.%s != "-" {
log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
} }
} }
// We have packed everything, only now we know the rdlength of this RR
fmt.Fprintln(b, "rr.Header().Rdlength = uint16(off-headerEnd)")
fmt.Fprintln(b, "return off, nil }\n") fmt.Fprintln(b, "return off, nil }\n")
} }
@ -186,14 +179,8 @@ if rr.%s != "-" {
o := scope.Lookup(name) o := scope.Lookup(name)
st, _ := getTypeStruct(o.Type(), scope) st, _ := getTypeStruct(o.Type(), scope)
fmt.Fprintf(b, "func unpack%s(h RR_Header, msg []byte, off int) (RR, int, error) {\n", name) fmt.Fprintf(b, "func (rr *%s) unpack(msg []byte, off int) (off1 int, err error) {\n", name)
fmt.Fprintf(b, "rr := new(%s)\n", name) fmt.Fprint(b, `rdStart := off
fmt.Fprint(b, "rr.Hdr = h\n")
fmt.Fprint(b, `if noRdata(h) {
return rr, off, nil
}
var err error
rdStart := off
_ = rdStart _ = rdStart
`) `)
@ -201,7 +188,7 @@ _ = rdStart
o := func(s string) { o := func(s string) {
fmt.Fprintf(b, s, st.Field(i).Name()) fmt.Fprintf(b, s, st.Field(i).Name())
fmt.Fprint(b, `if err != nil { fmt.Fprint(b, `if err != nil {
return rr, off, err return off, err
} }
`) `)
} }
@ -221,7 +208,7 @@ return rr, off, err
log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
} }
fmt.Fprint(b, `if err != nil { fmt.Fprint(b, `if err != nil {
return rr, off, err return off, err
} }
`) `)
continue continue
@ -264,6 +251,8 @@ return rr, off, err
o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") o("rr.%s, off, err = unpackStringBase64(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"hex"`: case `dns:"hex"`:
o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n") o("rr.%s, off, err = unpackStringHex(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"any"`:
o("rr.%s, off, err = unpackStringAny(msg, off, rdStart + int(rr.Hdr.Rdlength))\n")
case `dns:"octet"`: case `dns:"octet"`:
o("rr.%s, off, err = unpackStringOctet(msg, off)\n") o("rr.%s, off, err = unpackStringOctet(msg, off)\n")
case "": case "":
@ -287,22 +276,13 @@ return rr, off, err
// If we've hit len(msg) we return without error. // If we've hit len(msg) we return without error.
if i < st.NumFields()-1 { if i < st.NumFields()-1 {
fmt.Fprintf(b, `if off == len(msg) { fmt.Fprintf(b, `if off == len(msg) {
return rr, off, nil return off, nil
} }
`) `)
} }
} }
fmt.Fprintf(b, "return rr, off, err }\n\n") fmt.Fprintf(b, "return off, nil }\n\n")
} }
// Generate typeToUnpack map
fmt.Fprintln(b, "var typeToUnpack = map[uint16]func(RR_Header, []byte, int) (RR, int, error){")
for _, name := range namedTypes {
if name == "RFC3597" {
continue
}
fmt.Fprintf(b, "Type%s: unpack%s,\n", name, name)
}
fmt.Fprintln(b, "}\n")
// gofmt // gofmt
res, err := format.Source(b.Bytes()) res, err := format.Source(b.Bytes())

View file

@ -25,12 +25,13 @@ func unpackDataA(msg []byte, off int) (net.IP, int, error) {
} }
func packDataA(a net.IP, msg []byte, off int) (int, error) { func packDataA(a net.IP, msg []byte, off int) (int, error) {
// It must be a slice of 4, even if it is 16, we encode only the first 4
if off+net.IPv4len > len(msg) {
return len(msg), &Error{err: "overflow packing a"}
}
switch len(a) { switch len(a) {
case net.IPv4len, net.IPv6len: case net.IPv4len, net.IPv6len:
// It must be a slice of 4, even if it is 16, we encode only the first 4
if off+net.IPv4len > len(msg) {
return len(msg), &Error{err: "overflow packing a"}
}
copy(msg[off:], a.To4()) copy(msg[off:], a.To4())
off += net.IPv4len off += net.IPv4len
case 0: case 0:
@ -51,12 +52,12 @@ func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) {
} }
func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) {
if off+net.IPv6len > len(msg) {
return len(msg), &Error{err: "overflow packing aaaa"}
}
switch len(aaaa) { switch len(aaaa) {
case net.IPv6len: case net.IPv6len:
if off+net.IPv6len > len(msg) {
return len(msg), &Error{err: "overflow packing aaaa"}
}
copy(msg[off:], aaaa) copy(msg[off:], aaaa)
off += net.IPv6len off += net.IPv6len
case 0: case 0:
@ -99,14 +100,14 @@ func unpackHeader(msg []byte, off int) (rr RR_Header, off1 int, truncmsg []byte,
return hdr, off, msg, err return hdr, off, msg, err
} }
// pack packs an RR header, returning the offset to the end of the header. // packHeader packs an RR header, returning the offset to the end of the header.
// See PackDomainName for documentation about the compression. // See PackDomainName for documentation about the compression.
func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compress bool) (off1 int, err error) { func (hdr RR_Header) packHeader(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
if off == len(msg) { if off == len(msg) {
return off, nil return off, nil
} }
off, err = PackDomainName(hdr.Name, msg, off, compression, compress) off, err := packDomainName(hdr.Name, msg, off, compression, compress)
if err != nil { if err != nil {
return len(msg), err return len(msg), err
} }
@ -122,7 +123,7 @@ func (hdr RR_Header) pack(msg []byte, off int, compression map[string]int, compr
if err != nil { if err != nil {
return len(msg), err return len(msg), err
} }
off, err = packUint16(hdr.Rdlength, msg, off) off, err = packUint16(0, msg, off) // The RDLENGTH field will be set later in packRR.
if err != nil { if err != nil {
return len(msg), err return len(msg), err
} }
@ -177,14 +178,14 @@ func unpackUint8(msg []byte, off int) (i uint8, off1 int, err error) {
if off+1 > len(msg) { if off+1 > len(msg) {
return 0, len(msg), &Error{err: "overflow unpacking uint8"} return 0, len(msg), &Error{err: "overflow unpacking uint8"}
} }
return uint8(msg[off]), off + 1, nil return msg[off], off + 1, nil
} }
func packUint8(i uint8, msg []byte, off int) (off1 int, err error) { func packUint8(i uint8, msg []byte, off int) (off1 int, err error) {
if off+1 > len(msg) { if off+1 > len(msg) {
return len(msg), &Error{err: "overflow packing uint8"} return len(msg), &Error{err: "overflow packing uint8"}
} }
msg[off] = byte(i) msg[off] = i
return off + 1, nil return off + 1, nil
} }
@ -275,7 +276,7 @@ func unpackString(msg []byte, off int) (string, int, error) {
s.WriteByte('\\') s.WriteByte('\\')
s.WriteByte(b) s.WriteByte(b)
case b < ' ' || b > '~': // unprintable case b < ' ' || b > '~': // unprintable
writeEscapedByte(&s, b) s.WriteString(escapeByte(b))
default: default:
s.WriteByte(b) s.WriteByte(b)
} }
@ -363,6 +364,22 @@ func packStringHex(s string, msg []byte, off int) (int, error) {
return off, nil return off, nil
} }
func unpackStringAny(msg []byte, off, end int) (string, int, error) {
if end > len(msg) {
return "", len(msg), &Error{err: "overflow unpacking anything"}
}
return string(msg[off:end]), end, nil
}
func packStringAny(s string, msg []byte, off int) (int, error) {
if off+len(s) > len(msg) {
return len(msg), &Error{err: "overflow packing anything"}
}
copy(msg[off:off+len(s)], s)
off += len(s)
return off, nil
}
func unpackStringTxt(msg []byte, off int) ([]string, int, error) { func unpackStringTxt(msg []byte, off int) ([]string, int, error) {
txt, off, err := unpackTxt(msg, off) txt, off, err := unpackTxt(msg, off)
if err != nil { if err != nil {
@ -383,7 +400,7 @@ func packStringTxt(s []string, msg []byte, off int) (int, error) {
func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) {
var edns []EDNS0 var edns []EDNS0
Option: Option:
code := uint16(0) var code uint16
if off+4 > len(msg) { if off+4 > len(msg) {
return nil, len(msg), &Error{err: "overflow unpacking opt"} return nil, len(msg), &Error{err: "overflow unpacking opt"}
} }
@ -537,8 +554,7 @@ func unpackDataNsec(msg []byte, off int) ([]uint16, int, error) {
} }
// Walk the bytes in the window and extract the type bits // Walk the bytes in the window and extract the type bits
for j := 0; j < length; j++ { for j, b := range msg[off : off+length] {
b := msg[off+j]
// Check the bits one by one, and set the type // Check the bits one by one, and set the type
if b&0x80 == 0x80 { if b&0x80 == 0x80 {
nsec = append(nsec, uint16(window*256+j*8+0)) nsec = append(nsec, uint16(window*256+j*8+0))
@ -576,8 +592,7 @@ func packDataNsec(bitmap []uint16, msg []byte, off int) (int, error) {
return off, nil return off, nil
} }
var lastwindow, lastlength uint16 var lastwindow, lastlength uint16
for j := 0; j < len(bitmap); j++ { for _, t := range bitmap {
t := bitmap[j]
window := t / 256 window := t / 256
length := (t-window*256)/8 + 1 length := (t-window*256)/8 + 1
if window > lastwindow && lastlength != 0 { // New window, jump to the new offset if window > lastwindow && lastlength != 0 { // New window, jump to the new offset
@ -621,10 +636,10 @@ func unpackDataDomainNames(msg []byte, off, end int) ([]string, int, error) {
return servers, off, nil return servers, off, nil
} }
func packDataDomainNames(names []string, msg []byte, off int, compression map[string]int, compress bool) (int, error) { func packDataDomainNames(names []string, msg []byte, off int, compression compressionMap, compress bool) (int, error) {
var err error var err error
for j := 0; j < len(names); j++ { for _, name := range names {
off, err = PackDomainName(names[j], msg, off, compression, false && compress) off, err = packDomainName(name, msg, off, compression, compress)
if err != nil { if err != nil {
return len(msg), err return len(msg), err
} }

106
vendor/github.com/miekg/dns/msg_truncate.go generated vendored Normal file
View file

@ -0,0 +1,106 @@
package dns
// Truncate ensures the reply message will fit into the requested buffer
// size by removing records that exceed the requested size.
//
// It will first check if the reply fits without compression and then with
// compression. If it won't fit with compression, Scrub then walks the
// record adding as many records as possible without exceeding the
// requested buffer size.
//
// The TC bit will be set if any answer records were excluded from the
// message. This indicates to that the client should retry over TCP.
//
// The appropriate buffer size can be retrieved from the requests OPT
// record, if present, and is transport specific otherwise. dns.MinMsgSize
// should be used for UDP requests without an OPT record, and
// dns.MaxMsgSize for TCP requests without an OPT record.
func (dns *Msg) Truncate(size int) {
if dns.IsTsig() != nil {
// To simplify this implementation, we don't perform
// truncation on responses with a TSIG record.
return
}
// RFC 6891 mandates that the payload size in an OPT record
// less than 512 bytes must be treated as equal to 512 bytes.
//
// For ease of use, we impose that restriction here.
if size < 512 {
size = 512
}
l := msgLenWithCompressionMap(dns, nil) // uncompressed length
if l <= size {
// Don't waste effort compressing this message.
dns.Compress = false
return
}
dns.Compress = true
edns0 := dns.popEdns0()
if edns0 != nil {
// Account for the OPT record that gets added at the end,
// by subtracting that length from our budget.
//
// The EDNS(0) OPT record must have the root domain and
// it's length is thus unaffected by compression.
size -= Len(edns0)
}
compression := make(map[string]struct{})
l = headerSize
for _, r := range dns.Question {
l += r.len(l, compression)
}
var numAnswer int
if l < size {
l, numAnswer = truncateLoop(dns.Answer, size, l, compression)
}
var numNS int
if l < size {
l, numNS = truncateLoop(dns.Ns, size, l, compression)
}
var numExtra int
if l < size {
l, numExtra = truncateLoop(dns.Extra, size, l, compression)
}
// According to RFC 2181, the TC bit should only be set if not all
// of the answer RRs can be included in the response.
dns.Truncated = len(dns.Answer) > numAnswer
dns.Answer = dns.Answer[:numAnswer]
dns.Ns = dns.Ns[:numNS]
dns.Extra = dns.Extra[:numExtra]
if edns0 != nil {
// Add the OPT record back onto the additional section.
dns.Extra = append(dns.Extra, edns0)
}
}
func truncateLoop(rrs []RR, size, l int, compression map[string]struct{}) (int, int) {
for i, r := range rrs {
if r == nil {
continue
}
l += r.len(l, compression)
if l > size {
// Return size, rather than l prior to this record,
// to prevent any further records being added.
return size, i
}
if l == size {
return l, i + 1
}
}
return l, len(rrs)
}

41
vendor/github.com/miekg/dns/nsecx.go generated vendored
View file

@ -2,49 +2,44 @@ package dns
import ( import (
"crypto/sha1" "crypto/sha1"
"hash" "encoding/hex"
"strings" "strings"
) )
type saltWireFmt struct {
Salt string `dns:"size-hex"`
}
// HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase. // HashName hashes a string (label) according to RFC 5155. It returns the hashed string in uppercase.
func HashName(label string, ha uint8, iter uint16, salt string) string { func HashName(label string, ha uint8, iter uint16, salt string) string {
saltwire := new(saltWireFmt) if ha != SHA1 {
saltwire.Salt = salt return ""
wire := make([]byte, DefaultMsgSize) }
n, err := packSaltWire(saltwire, wire)
wireSalt := make([]byte, hex.DecodedLen(len(salt)))
n, err := packStringHex(salt, wireSalt, 0)
if err != nil { if err != nil {
return "" return ""
} }
wire = wire[:n] wireSalt = wireSalt[:n]
name := make([]byte, 255) name := make([]byte, 255)
off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false) off, err := PackDomainName(strings.ToLower(label), name, 0, nil, false)
if err != nil { if err != nil {
return "" return ""
} }
name = name[:off] name = name[:off]
var s hash.Hash
switch ha {
case SHA1:
s = sha1.New()
default:
return ""
}
s := sha1.New()
// k = 0 // k = 0
s.Write(name) s.Write(name)
s.Write(wire) s.Write(wireSalt)
nsec3 := s.Sum(nil) nsec3 := s.Sum(nil)
// k > 0 // k > 0
for k := uint16(0); k < iter; k++ { for k := uint16(0); k < iter; k++ {
s.Reset() s.Reset()
s.Write(nsec3) s.Write(nsec3)
s.Write(wire) s.Write(wireSalt)
nsec3 = s.Sum(nsec3[:0]) nsec3 = s.Sum(nsec3[:0])
} }
return toBase32(nsec3) return toBase32(nsec3)
} }
@ -98,11 +93,3 @@ func (rr *NSEC3) Match(name string) bool {
} }
return false return false
} }
func packSaltWire(sw *saltWireFmt, msg []byte) (int, error) {
off, err := packStringHex(sw.Salt, msg, 0)
if err != nil {
return off, err
}
return off, nil
}

View file

@ -39,11 +39,12 @@ func mkPrivateRR(rrtype uint16) *PrivateRR {
} }
anyrr := rrfunc() anyrr := rrfunc()
switch rr := anyrr.(type) { rr, ok := anyrr.(*PrivateRR)
case *PrivateRR: if !ok {
return rr panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
} }
panic(fmt.Sprintf("dns: RR is not a PrivateRR, TypeToRR[%d] generator returned %T", rrtype, anyrr))
return rr
} }
// Header return the RR header of r. // Header return the RR header of r.
@ -52,7 +53,12 @@ func (r *PrivateRR) Header() *RR_Header { return &r.Hdr }
func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() } func (r *PrivateRR) String() string { return r.Hdr.String() + r.Data.String() }
// Private len and copy parts to satisfy RR interface. // Private len and copy parts to satisfy RR interface.
func (r *PrivateRR) len() int { return r.Hdr.len() + r.Data.Len() } func (r *PrivateRR) len(off int, compression map[string]struct{}) int {
l := r.Hdr.len(off, compression)
l += r.Data.Len()
return l
}
func (r *PrivateRR) copy() RR { func (r *PrivateRR) copy() RR {
// make new RR like this: // make new RR like this:
rr := mkPrivateRR(r.Hdr.Rrtype) rr := mkPrivateRR(r.Hdr.Rrtype)
@ -64,21 +70,47 @@ func (r *PrivateRR) copy() RR {
} }
return rr return rr
} }
func (r *PrivateRR) pack(msg []byte, off int, compression map[string]int, compress bool) (int, error) {
off, err := r.Hdr.pack(msg, off, compression, compress) func (r *PrivateRR) pack(msg []byte, off int, compression compressionMap, compress bool) (int, error) {
if err != nil {
return off, err
}
headerEnd := off
n, err := r.Data.Pack(msg[off:]) n, err := r.Data.Pack(msg[off:])
if err != nil { if err != nil {
return len(msg), err return len(msg), err
} }
off += n off += n
r.Header().Rdlength = uint16(off - headerEnd)
return off, nil return off, nil
} }
func (r *PrivateRR) unpack(msg []byte, off int) (int, error) {
off1, err := r.Data.Unpack(msg[off:])
off += off1
return off, err
}
func (r *PrivateRR) parse(c *zlexer, origin, file string) *ParseError {
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
Fetch:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l, _ = c.Next(); l.value {
case zNewline, zEOF:
break Fetch
case zString:
text = append(text, l.token)
}
}
err := r.Data.Parse(text)
if err != nil {
return &ParseError{file, err.Error(), l}
}
return nil
}
func (r1 *PrivateRR) isDuplicate(r2 RR) bool { return false }
// PrivateHandle registers a private resource record type. It requires // PrivateHandle registers a private resource record type. It requires
// string and numeric representation of private RR type and generator function as argument. // string and numeric representation of private RR type and generator function as argument.
func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) { func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata) {
@ -87,51 +119,6 @@ func PrivateHandle(rtypestr string, rtype uint16, generator func() PrivateRdata)
TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} } TypeToRR[rtype] = func() RR { return &PrivateRR{RR_Header{}, generator()} }
TypeToString[rtype] = rtypestr TypeToString[rtype] = rtypestr
StringToType[rtypestr] = rtype StringToType[rtypestr] = rtype
typeToUnpack[rtype] = func(h RR_Header, msg []byte, off int) (RR, int, error) {
if noRdata(h) {
return &h, off, nil
}
var err error
rr := mkPrivateRR(h.Rrtype)
rr.Hdr = h
off1, err := rr.Data.Unpack(msg[off:])
off += off1
if err != nil {
return rr, off, err
}
return rr, off, err
}
setPrivateRR := func(h RR_Header, c *zlexer, o, f string) (RR, *ParseError, string) {
rr := mkPrivateRR(h.Rrtype)
rr.Hdr = h
var l lex
text := make([]string, 0, 2) // could be 0..N elements, median is probably 1
Fetch:
for {
// TODO(miek): we could also be returning _QUOTE, this might or might not
// be an issue (basically parsing TXT becomes hard)
switch l, _ = c.Next(); l.value {
case zNewline, zEOF:
break Fetch
case zString:
text = append(text, l.token)
}
}
err := rr.Data.Parse(text)
if err != nil {
return nil, &ParseError{f, err.Error(), l}, ""
}
return rr, nil, ""
}
typeToparserFunc[rtype] = parserFunc{setPrivateRR, true}
} }
// PrivateHandleRemove removes definitions required to support private RR type. // PrivateHandleRemove removes definitions required to support private RR type.
@ -140,8 +127,6 @@ func PrivateHandleRemove(rtype uint16) {
if ok { if ok {
delete(TypeToRR, rtype) delete(TypeToRR, rtype)
delete(TypeToString, rtype) delete(TypeToString, rtype)
delete(typeToparserFunc, rtype)
delete(StringToType, rtypestr) delete(StringToType, rtypestr)
delete(typeToUnpack, rtype)
} }
} }

View file

@ -1,49 +0,0 @@
package dns
import "encoding/binary"
// rawSetRdlength sets the rdlength in the header of
// the RR. The offset 'off' must be positioned at the
// start of the header of the RR, 'end' must be the
// end of the RR.
func rawSetRdlength(msg []byte, off, end int) bool {
l := len(msg)
Loop:
for {
if off+1 > l {
return false
}
c := int(msg[off])
off++
switch c & 0xC0 {
case 0x00:
if c == 0x00 {
// End of the domainname
break Loop
}
if off+c > l {
return false
}
off += c
case 0xC0:
// pointer, next byte included, ends domainname
off++
break Loop
}
}
// The domainname has been seen, we at the start of the fixed part in the header.
// Type is 2 bytes, class is 2 bytes, ttl 4 and then 2 bytes for the length.
off += 2 + 2 + 4
if off+2 > l {
return false
}
//off+1 is the end of the header, 'end' is the end of the rr
//so 'end' - 'off+2' is the length of the rdata
rdatalen := end - (off + 2)
if rdatalen > 0xFFFF {
return false
}
binary.BigEndian.PutUint16(msg[off:], uint16(rdatalen))
return true
}

View file

@ -17,6 +17,15 @@ func init() {
StringToRcode["NOTIMPL"] = RcodeNotImplemented StringToRcode["NOTIMPL"] = RcodeNotImplemented
} }
// StringToAlgorithm is the reverse of AlgorithmToString.
var StringToAlgorithm = reverseInt8(AlgorithmToString)
// StringToHash is a map of names to hash IDs.
var StringToHash = reverseInt8(HashToString)
// StringToCertType is the reverseof CertTypeToString.
var StringToCertType = reverseInt16(CertTypeToString)
// Reverse a map // Reverse a map
func reverseInt8(m map[uint8]string) map[string]uint8 { func reverseInt8(m map[uint8]string) map[string]uint8 {
n := make(map[string]uint8, len(m)) n := make(map[string]uint8, len(m))

View file

@ -15,10 +15,11 @@ func Dedup(rrs []RR, m map[string]RR) []RR {
for _, r := range rrs { for _, r := range rrs {
key := normalizedString(r) key := normalizedString(r)
keys = append(keys, &key) keys = append(keys, &key)
if _, ok := m[key]; ok { if mr, ok := m[key]; ok {
// Shortest TTL wins. // Shortest TTL wins.
if m[key].Header().Ttl > r.Header().Ttl { rh, mrh := r.Header(), mr.Header()
m[key].Header().Ttl = r.Header().Ttl if mrh.Ttl > rh.Ttl {
mrh.Ttl = rh.Ttl
} }
continue continue
} }

88
vendor/github.com/miekg/dns/scan.go generated vendored
View file

@ -79,13 +79,12 @@ func (e *ParseError) Error() (s string) {
} }
type lex struct { type lex struct {
token string // text of the token token string // text of the token
err bool // when true, token text has lexer error err bool // when true, token text has lexer error
value uint8 // value: zString, _BLANK, etc. value uint8 // value: zString, _BLANK, etc.
torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar torc uint16 // type or class as parsed in the lexer, we only need to look this up in the grammar
line int // line in the file line int // line in the file
column int // column in the file column int // column in the file
comment string // any comment text seen
} }
// Token holds the token that are returned when a zone file is parsed. // Token holds the token that are returned when a zone file is parsed.
@ -244,8 +243,6 @@ type ZoneParser struct {
sub *ZoneParser sub *ZoneParser
osFile *os.File osFile *os.File
com string
includeDepth uint8 includeDepth uint8
includeAllowed bool includeAllowed bool
@ -318,12 +315,19 @@ func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) {
// Comment returns an optional text comment that occurred alongside // Comment returns an optional text comment that occurred alongside
// the RR. // the RR.
func (zp *ZoneParser) Comment() string { func (zp *ZoneParser) Comment() string {
return zp.com if zp.parseErr != nil {
return ""
}
if zp.sub != nil {
return zp.sub.Comment()
}
return zp.c.Comment()
} }
func (zp *ZoneParser) subNext() (RR, bool) { func (zp *ZoneParser) subNext() (RR, bool) {
if rr, ok := zp.sub.Next(); ok { if rr, ok := zp.sub.Next(); ok {
zp.com = zp.sub.com
return rr, true return rr, true
} }
@ -347,8 +351,6 @@ func (zp *ZoneParser) subNext() (RR, bool) {
// error. After Next returns (nil, false), the Err method will return // error. After Next returns (nil, false), the Err method will return
// any error that occurred during parsing. // any error that occurred during parsing.
func (zp *ZoneParser) Next() (RR, bool) { func (zp *ZoneParser) Next() (RR, bool) {
zp.com = ""
if zp.parseErr != nil { if zp.parseErr != nil {
return nil, false return nil, false
} }
@ -501,7 +503,7 @@ func (zp *ZoneParser) Next() (RR, bool) {
return zp.setParseError("expecting $TTL value, not this...", l) return zp.setParseError("expecting $TTL value, not this...", l)
} }
if e, _ := slurpRemainder(zp.c, zp.file); e != nil { if e := slurpRemainder(zp.c, zp.file); e != nil {
zp.parseErr = e zp.parseErr = e
return nil, false return nil, false
} }
@ -525,7 +527,7 @@ func (zp *ZoneParser) Next() (RR, bool) {
return zp.setParseError("expecting $ORIGIN value, not this...", l) return zp.setParseError("expecting $ORIGIN value, not this...", l)
} }
if e, _ := slurpRemainder(zp.c, zp.file); e != nil { if e := slurpRemainder(zp.c, zp.file); e != nil {
zp.parseErr = e zp.parseErr = e
return nil, false return nil, false
} }
@ -648,7 +650,7 @@ func (zp *ZoneParser) Next() (RR, bool) {
st = zExpectRdata st = zExpectRdata
case zExpectRdata: case zExpectRdata:
r, e, c1 := setRR(*h, zp.c, zp.origin, zp.file) r, e := setRR(*h, zp.c, zp.origin, zp.file)
if e != nil { if e != nil {
// If e.lex is nil than we have encounter a unknown RR type // If e.lex is nil than we have encounter a unknown RR type
// in that case we substitute our current lex token // in that case we substitute our current lex token
@ -660,7 +662,6 @@ func (zp *ZoneParser) Next() (RR, bool) {
return nil, false return nil, false
} }
zp.com = c1
return r, true return r, true
} }
} }
@ -678,7 +679,8 @@ type zlexer struct {
line int line int
column int column int
com string comBuf string
comment string
l lex l lex
@ -767,14 +769,15 @@ func (zl *zlexer) Next() (lex, bool) {
escape bool escape bool
) )
if zl.com != "" { if zl.comBuf != "" {
comi = copy(com[:], zl.com) comi = copy(com[:], zl.comBuf)
zl.com = "" zl.comBuf = ""
} }
zl.comment = ""
for x, ok := zl.readByte(); ok; x, ok = zl.readByte() { for x, ok := zl.readByte(); ok; x, ok = zl.readByte() {
l.line, l.column = zl.line, zl.column l.line, l.column = zl.line, zl.column
l.comment = ""
if stri >= len(str) { if stri >= len(str) {
l.token = "token length insufficient for parsing" l.token = "token length insufficient for parsing"
@ -898,7 +901,7 @@ func (zl *zlexer) Next() (lex, bool) {
} }
zl.commt = true zl.commt = true
zl.com = "" zl.comBuf = ""
if comi > 1 { if comi > 1 {
// A newline was previously seen inside a comment that // A newline was previously seen inside a comment that
@ -911,7 +914,7 @@ func (zl *zlexer) Next() (lex, bool) {
comi++ comi++
if stri > 0 { if stri > 0 {
zl.com = string(com[:comi]) zl.comBuf = string(com[:comi])
l.value = zString l.value = zString
l.token = string(str[:stri]) l.token = string(str[:stri])
@ -947,11 +950,11 @@ func (zl *zlexer) Next() (lex, bool) {
l.value = zNewline l.value = zNewline
l.token = "\n" l.token = "\n"
l.comment = string(com[:comi]) zl.comment = string(com[:comi])
return *l, true return *l, true
} }
zl.com = string(com[:comi]) zl.comBuf = string(com[:comi])
break break
} }
@ -977,9 +980,9 @@ func (zl *zlexer) Next() (lex, bool) {
l.value = zNewline l.value = zNewline
l.token = "\n" l.token = "\n"
l.comment = zl.com
zl.com = "" zl.comment = zl.comBuf
zl.comBuf = ""
zl.rrtype = false zl.rrtype = false
zl.owner = true zl.owner = true
@ -1115,7 +1118,7 @@ func (zl *zlexer) Next() (lex, bool) {
// Send remainder of com // Send remainder of com
l.value = zNewline l.value = zNewline
l.token = "\n" l.token = "\n"
l.comment = string(com[:comi]) zl.comment = string(com[:comi])
if retL != (lex{}) { if retL != (lex{}) {
zl.nextL = true zl.nextL = true
@ -1126,7 +1129,6 @@ func (zl *zlexer) Next() (lex, bool) {
} }
if zl.brace != 0 { if zl.brace != 0 {
l.comment = "" // in case there was left over string and comment
l.token = "unbalanced brace" l.token = "unbalanced brace"
l.err = true l.err = true
return *l, true return *l, true
@ -1135,6 +1137,14 @@ func (zl *zlexer) Next() (lex, bool) {
return lex{value: zEOF}, false return lex{value: zEOF}, false
} }
func (zl *zlexer) Comment() string {
if zl.l.err {
return ""
}
return zl.comment
}
// Extract the class number from CLASSxx // Extract the class number from CLASSxx
func classToInt(token string) (uint16, bool) { func classToInt(token string) (uint16, bool) {
offset := 5 offset := 5
@ -1163,8 +1173,7 @@ func typeToInt(token string) (uint16, bool) {
// stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds. // stringToTTL parses things like 2w, 2m, etc, and returns the time in seconds.
func stringToTTL(token string) (uint32, bool) { func stringToTTL(token string) (uint32, bool) {
s := uint32(0) var s, i uint32
i := uint32(0)
for _, c := range token { for _, c := range token {
switch c { switch c {
case 's', 'S': case 's', 'S':
@ -1252,7 +1261,7 @@ func toAbsoluteName(name, origin string) (absolute string, ok bool) {
} }
// check if name is already absolute // check if name is already absolute
if name[len(name)-1] == '.' { if IsFqdn(name) {
return name, true return name, true
} }
@ -1292,24 +1301,21 @@ func locCheckEast(token string, longitude uint32) (uint32, bool) {
return longitude, false return longitude, false
} }
// "Eat" the rest of the "line". Return potential comments // "Eat" the rest of the "line"
func slurpRemainder(c *zlexer, f string) (*ParseError, string) { func slurpRemainder(c *zlexer, f string) *ParseError {
l, _ := c.Next() l, _ := c.Next()
com := ""
switch l.value { switch l.value {
case zBlank: case zBlank:
l, _ = c.Next() l, _ = c.Next()
com = l.comment
if l.value != zNewline && l.value != zEOF { if l.value != zNewline && l.value != zEOF {
return &ParseError{f, "garbage after rdata", l}, "" return &ParseError{f, "garbage after rdata", l}
} }
case zNewline: case zNewline:
com = l.comment
case zEOF: case zEOF:
default: default:
return &ParseError{f, "garbage after rdata", l}, "" return &ParseError{f, "garbage after rdata", l}
} }
return nil, com return nil
} }
// Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64" // Parse a 64 bit-like ipv6 address: "0014:4fff:ff20:ee64"

1142
vendor/github.com/miekg/dns/scan_rr.go generated vendored

File diff suppressed because it is too large Load diff

249
vendor/github.com/miekg/dns/server.go generated vendored
View file

@ -3,7 +3,6 @@
package dns package dns
import ( import (
"bytes"
"context" "context"
"crypto/tls" "crypto/tls"
"encoding/binary" "encoding/binary"
@ -12,26 +11,12 @@ import (
"net" "net"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"time" "time"
) )
// Default maximum number of TCP queries before we close the socket. // Default maximum number of TCP queries before we close the socket.
const maxTCPQueries = 128 const maxTCPQueries = 128
// The maximum number of idle workers.
//
// This controls the maximum number of workers that are allowed to stay
// idle waiting for incoming requests before being torn down.
//
// If this limit is reached, the server will just keep spawning new
// workers (goroutines) for each incoming request. In this case, each
// worker will only be used for a single request.
const maxIdleWorkersCount = 10000
// The maximum length of time a worker may idle for before being destroyed.
const idleWorkerTimeout = 10 * time.Second
// aLongTimeAgo is a non-zero time, far in the past, used for // aLongTimeAgo is a non-zero time, far in the past, used for
// immediate cancelation of network operations. // immediate cancelation of network operations.
var aLongTimeAgo = time.Unix(1, 0) var aLongTimeAgo = time.Unix(1, 0)
@ -81,7 +66,6 @@ type ConnectionStater interface {
} }
type response struct { type response struct {
msg []byte
closed bool // connection has been closed closed bool // connection has been closed
hijacked bool // connection has been hijacked by handler hijacked bool // connection has been hijacked by handler
tsigTimersOnly bool tsigTimersOnly bool
@ -92,7 +76,6 @@ type response struct {
tcp net.Conn // i/o connection if TCP was used tcp net.Conn // i/o connection if TCP was used
udpSession *SessionUDP // oob data to get egress interface right udpSession *SessionUDP // oob data to get egress interface right
writer Writer // writer to output the raw DNS bits writer Writer // writer to output the raw DNS bits
wg *sync.WaitGroup // for gracefull shutdown
} }
// HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets. // HandleFailed returns a HandlerFunc that returns SERVFAIL for every request it gets.
@ -162,11 +145,11 @@ type defaultReader struct {
*Server *Server
} }
func (dr *defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { func (dr defaultReader) ReadTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
return dr.readTCP(conn, timeout) return dr.readTCP(conn, timeout)
} }
func (dr *defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) { func (dr defaultReader) ReadUDP(conn *net.UDPConn, timeout time.Duration) ([]byte, *SessionUDP, error) {
return dr.readUDP(conn, timeout) return dr.readUDP(conn, timeout)
} }
@ -218,11 +201,6 @@ type Server struct {
// By default DefaultMsgAcceptFunc will be used. // By default DefaultMsgAcceptFunc will be used.
MsgAcceptFunc MsgAcceptFunc MsgAcceptFunc MsgAcceptFunc
// UDP packet or TCP connection queue
queue chan *response
// Workers count
workersCount int32
// Shutdown handling // Shutdown handling
lock sync.RWMutex lock sync.RWMutex
started bool started bool
@ -240,51 +218,6 @@ func (srv *Server) isStarted() bool {
return started return started
} }
func (srv *Server) worker(w *response) {
srv.serve(w)
for {
count := atomic.LoadInt32(&srv.workersCount)
if count > maxIdleWorkersCount {
return
}
if atomic.CompareAndSwapInt32(&srv.workersCount, count, count+1) {
break
}
}
defer atomic.AddInt32(&srv.workersCount, -1)
inUse := false
timeout := time.NewTimer(idleWorkerTimeout)
defer timeout.Stop()
LOOP:
for {
select {
case w, ok := <-srv.queue:
if !ok {
break LOOP
}
inUse = true
srv.serve(w)
case <-timeout.C:
if !inUse {
break LOOP
}
inUse = false
timeout.Reset(idleWorkerTimeout)
}
}
}
func (srv *Server) spawnWorker(w *response) {
select {
case srv.queue <- w:
default:
go srv.worker(w)
}
}
func makeUDPBuffer(size int) func() interface{} { func makeUDPBuffer(size int) func() interface{} {
return func() interface{} { return func() interface{} {
return make([]byte, size) return make([]byte, size)
@ -292,8 +225,6 @@ func makeUDPBuffer(size int) func() interface{} {
} }
func (srv *Server) init() { func (srv *Server) init() {
srv.queue = make(chan *response)
srv.shutdown = make(chan struct{}) srv.shutdown = make(chan struct{})
srv.conns = make(map[net.Conn]struct{}) srv.conns = make(map[net.Conn]struct{})
@ -301,7 +232,10 @@ func (srv *Server) init() {
srv.UDPSize = MinMsgSize srv.UDPSize = MinMsgSize
} }
if srv.MsgAcceptFunc == nil { if srv.MsgAcceptFunc == nil {
srv.MsgAcceptFunc = defaultMsgAcceptFunc srv.MsgAcceptFunc = DefaultMsgAcceptFunc
}
if srv.Handler == nil {
srv.Handler = DefaultServeMux
} }
srv.udpPool.New = makeUDPBuffer(srv.UDPSize) srv.udpPool.New = makeUDPBuffer(srv.UDPSize)
@ -328,7 +262,6 @@ func (srv *Server) ListenAndServe() error {
} }
srv.init() srv.init()
defer close(srv.queue)
switch srv.Net { switch srv.Net {
case "tcp", "tcp4", "tcp6": case "tcp", "tcp4", "tcp6":
@ -383,7 +316,6 @@ func (srv *Server) ActivateAndServe() error {
} }
srv.init() srv.init()
defer close(srv.queue)
pConn := srv.PacketConn pConn := srv.PacketConn
l := srv.Listener l := srv.Listener
@ -463,11 +395,10 @@ var testShutdownNotify *sync.Cond
// getReadTimeout is a helper func to use system timeout if server did not intend to change it. // getReadTimeout is a helper func to use system timeout if server did not intend to change it.
func (srv *Server) getReadTimeout() time.Duration { func (srv *Server) getReadTimeout() time.Duration {
rtimeout := dnsTimeout
if srv.ReadTimeout != 0 { if srv.ReadTimeout != 0 {
rtimeout = srv.ReadTimeout return srv.ReadTimeout
} }
return rtimeout return dnsTimeout
} }
// serveTCP starts a TCP listener for the server. // serveTCP starts a TCP listener for the server.
@ -500,11 +431,7 @@ func (srv *Server) serveTCP(l net.Listener) error {
srv.conns[rw] = struct{}{} srv.conns[rw] = struct{}{}
srv.lock.Unlock() srv.lock.Unlock()
wg.Add(1) wg.Add(1)
srv.spawnWorker(&response{ go srv.serveTCPConn(&wg, rw)
tsigSecret: srv.TsigSecret,
tcp: rw,
wg: &wg,
})
} }
return nil return nil
@ -518,7 +445,7 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
srv.NotifyStartedFunc() srv.NotifyStartedFunc()
} }
reader := Reader(&defaultReader{srv}) reader := Reader(defaultReader{srv})
if srv.DecorateReader != nil { if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader) reader = srv.DecorateReader(reader)
} }
@ -549,46 +476,22 @@ func (srv *Server) serveUDP(l *net.UDPConn) error {
continue continue
} }
wg.Add(1) wg.Add(1)
srv.spawnWorker(&response{ go srv.serveUDPPacket(&wg, m, l, s)
msg: m,
tsigSecret: srv.TsigSecret,
udp: l,
udpSession: s,
wg: &wg,
})
} }
return nil return nil
} }
func (srv *Server) serve(w *response) { // Serve a new TCP connection.
func (srv *Server) serveTCPConn(wg *sync.WaitGroup, rw net.Conn) {
w := &response{tsigSecret: srv.TsigSecret, tcp: rw}
if srv.DecorateWriter != nil { if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w) w.writer = srv.DecorateWriter(w)
} else { } else {
w.writer = w w.writer = w
} }
if w.udp != nil { reader := Reader(defaultReader{srv})
// serve UDP
srv.serveDNS(w)
w.wg.Done()
return
}
defer func() {
if !w.hijacked {
w.Close()
}
srv.lock.Lock()
delete(srv.conns, w.tcp)
srv.lock.Unlock()
w.wg.Done()
}()
reader := Reader(&defaultReader{srv})
if srv.DecorateReader != nil { if srv.DecorateReader != nil {
reader = srv.DecorateReader(reader) reader = srv.DecorateReader(reader)
} }
@ -606,14 +509,13 @@ func (srv *Server) serve(w *response) {
} }
for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ { for q := 0; (q < limit || limit == -1) && srv.isStarted(); q++ {
var err error m, err := reader.ReadTCP(w.tcp, timeout)
w.msg, err = reader.ReadTCP(w.tcp, timeout)
if err != nil { if err != nil {
// TODO(tmthrgd): handle error // TODO(tmthrgd): handle error
break break
} }
srv.serveDNS(w) srv.serveDNS(m, w)
if w.tcp == nil { if w.closed {
break // Close() was called break // Close() was called
} }
if w.hijacked { if w.hijacked {
@ -623,17 +525,33 @@ func (srv *Server) serve(w *response) {
// idle timeout. // idle timeout.
timeout = idleTimeout timeout = idleTimeout
} }
}
func (srv *Server) disposeBuffer(w *response) { if !w.hijacked {
if w.udp != nil && cap(w.msg) == srv.UDPSize { w.Close()
srv.udpPool.Put(w.msg[:srv.UDPSize])
} }
w.msg = nil
srv.lock.Lock()
delete(srv.conns, w.tcp)
srv.lock.Unlock()
wg.Done()
} }
func (srv *Server) serveDNS(w *response) { // Serve a new UDP request.
dh, off, err := unpackMsgHdr(w.msg, 0) func (srv *Server) serveUDPPacket(wg *sync.WaitGroup, m []byte, u *net.UDPConn, s *SessionUDP) {
w := &response{tsigSecret: srv.TsigSecret, udp: u, udpSession: s}
if srv.DecorateWriter != nil {
w.writer = srv.DecorateWriter(w)
} else {
w.writer = w
}
srv.serveDNS(m, w)
wg.Done()
}
func (srv *Server) serveDNS(m []byte, w *response) {
dh, off, err := unpackMsgHdr(m, 0)
if err != nil { if err != nil {
// Let client hang, they are sending crap; any reply can be used to amplify. // Let client hang, they are sending crap; any reply can be used to amplify.
return return
@ -644,24 +562,23 @@ func (srv *Server) serveDNS(w *response) {
switch srv.MsgAcceptFunc(dh) { switch srv.MsgAcceptFunc(dh) {
case MsgAccept: case MsgAccept:
case MsgIgnore: if req.unpack(dh, m, off) == nil {
return break
}
fallthrough
case MsgReject: case MsgReject:
req.SetRcodeFormatError(req) req.SetRcodeFormatError(req)
// Are we allowed to delete any OPT records here? // Are we allowed to delete any OPT records here?
req.Ns, req.Answer, req.Extra = nil, nil, nil req.Ns, req.Answer, req.Extra = nil, nil, nil
w.WriteMsg(req) w.WriteMsg(req)
srv.disposeBuffer(w) fallthrough
return case MsgIgnore:
} if w.udp != nil && cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
}
if err := req.unpack(dh, w.msg, off); err != nil {
req.SetRcodeFormatError(req)
req.Ns, req.Answer, req.Extra = nil, nil, nil
w.WriteMsg(req)
srv.disposeBuffer(w)
return return
} }
@ -669,7 +586,7 @@ func (srv *Server) serveDNS(w *response) {
if w.tsigSecret != nil { if w.tsigSecret != nil {
if t := req.IsTsig(); t != nil { if t := req.IsTsig(); t != nil {
if secret, ok := w.tsigSecret[t.Hdr.Name]; ok { if secret, ok := w.tsigSecret[t.Hdr.Name]; ok {
w.tsigStatus = TsigVerify(w.msg, secret, "", false) w.tsigStatus = TsigVerify(m, secret, "", false)
} else { } else {
w.tsigStatus = ErrSecret w.tsigStatus = ErrSecret
} }
@ -678,14 +595,11 @@ func (srv *Server) serveDNS(w *response) {
} }
} }
srv.disposeBuffer(w) if w.udp != nil && cap(m) == srv.UDPSize {
srv.udpPool.Put(m[:srv.UDPSize])
handler := srv.Handler
if handler == nil {
handler = DefaultServeMux
} }
handler.ServeDNS(w, req) // Writes back to the client srv.Handler.ServeDNS(w, req) // Writes back to the client
} }
func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) { func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error) {
@ -699,36 +613,16 @@ func (srv *Server) readTCP(conn net.Conn, timeout time.Duration) ([]byte, error)
} }
srv.lock.RUnlock() srv.lock.RUnlock()
l := make([]byte, 2) var length uint16
n, err := conn.Read(l) if err := binary.Read(conn, binary.BigEndian, &length); err != nil {
if err != nil || n != 2 { return nil, err
if err != nil {
return nil, err
}
return nil, ErrShortRead
} }
length := binary.BigEndian.Uint16(l)
if length == 0 { m := make([]byte, length)
return nil, ErrShortRead if _, err := io.ReadFull(conn, m); err != nil {
return nil, err
} }
m := make([]byte, int(length))
n, err = conn.Read(m[:int(length)])
if err != nil || n == 0 {
if err != nil {
return nil, err
}
return nil, ErrShortRead
}
i := n
for i < int(length) {
j, err := conn.Read(m[i:int(length)])
if err != nil {
return nil, err
}
i += j
}
n = i
m = m[:n]
return m, nil return m, nil
} }
@ -783,21 +677,16 @@ func (w *response) Write(m []byte) (int, error) {
switch { switch {
case w.udp != nil: case w.udp != nil:
n, err := WriteToSessionUDP(w.udp, m, w.udpSession) return WriteToSessionUDP(w.udp, m, w.udpSession)
return n, err
case w.tcp != nil: case w.tcp != nil:
lm := len(m) if len(m) > MaxMsgSize {
if lm < 2 {
return 0, io.ErrShortBuffer
}
if lm > MaxMsgSize {
return 0, &Error{err: "message too large"} return 0, &Error{err: "message too large"}
} }
l := make([]byte, 2, 2+lm)
binary.BigEndian.PutUint16(l, uint16(lm))
m = append(l, m...)
n, err := io.Copy(w.tcp, bytes.NewReader(m)) l := make([]byte, 2)
binary.BigEndian.PutUint16(l, uint16(len(m)))
n, err := (&net.Buffers{l, m}).WriteTo(w.tcp)
return int(n), err return int(n), err
default: default:
panic("dns: internal error: udp and tcp both nil") panic("dns: internal error: udp and tcp both nil")

26
vendor/github.com/miekg/dns/sig0.go generated vendored
View file

@ -21,15 +21,11 @@ func (rr *SIG) Sign(k crypto.Signer, m *Msg) ([]byte, error) {
if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 { if rr.KeyTag == 0 || len(rr.SignerName) == 0 || rr.Algorithm == 0 {
return nil, ErrKey return nil, ErrKey
} }
rr.Header().Rrtype = TypeSIG
rr.Header().Class = ClassANY
rr.Header().Ttl = 0
rr.Header().Name = "."
rr.OrigTtl = 0
rr.TypeCovered = 0
rr.Labels = 0
buf := make([]byte, m.Len()+rr.len()) rr.Hdr = RR_Header{Name: ".", Rrtype: TypeSIG, Class: ClassANY, Ttl: 0}
rr.OrigTtl, rr.TypeCovered, rr.Labels = 0, 0, 0
buf := make([]byte, m.Len()+Len(rr))
mbuf, err := m.PackBuffer(buf) mbuf, err := m.PackBuffer(buf)
if err != nil { if err != nil {
return nil, err return nil, err
@ -107,7 +103,7 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
anc := binary.BigEndian.Uint16(buf[6:]) anc := binary.BigEndian.Uint16(buf[6:])
auc := binary.BigEndian.Uint16(buf[8:]) auc := binary.BigEndian.Uint16(buf[8:])
adc := binary.BigEndian.Uint16(buf[10:]) adc := binary.BigEndian.Uint16(buf[10:])
offset := 12 offset := headerSize
var err error var err error
for i := uint16(0); i < qdc && offset < buflen; i++ { for i := uint16(0); i < qdc && offset < buflen; i++ {
_, offset, err = UnpackDomainName(buf, offset) _, offset, err = UnpackDomainName(buf, offset)
@ -185,10 +181,8 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
case DSA: case DSA:
pk := k.publicKeyDSA() pk := k.publicKeyDSA()
sig = sig[1:] sig = sig[1:]
r := big.NewInt(0) r := new(big.Int).SetBytes(sig[:len(sig)/2])
r.SetBytes(sig[:len(sig)/2]) s := new(big.Int).SetBytes(sig[len(sig)/2:])
s := big.NewInt(0)
s.SetBytes(sig[len(sig)/2:])
if pk != nil { if pk != nil {
if dsa.Verify(pk, hashed, r, s) { if dsa.Verify(pk, hashed, r, s) {
return nil return nil
@ -202,10 +196,8 @@ func (rr *SIG) Verify(k *KEY, buf []byte) error {
} }
case ECDSAP256SHA256, ECDSAP384SHA384: case ECDSAP256SHA256, ECDSAP384SHA384:
pk := k.publicKeyECDSA() pk := k.publicKeyECDSA()
r := big.NewInt(0) r := new(big.Int).SetBytes(sig[:len(sig)/2])
r.SetBytes(sig[:len(sig)/2]) s := new(big.Int).SetBytes(sig[len(sig)/2:])
s := big.NewInt(0)
s.SetBytes(sig[len(sig)/2:])
if pk != nil { if pk != nil {
if ecdsa.Verify(pk, hashed, r, s) { if ecdsa.Verify(pk, hashed, r, s) {
return nil return nil

View file

@ -23,6 +23,8 @@ type call struct {
type singleflight struct { type singleflight struct {
sync.Mutex // protects m sync.Mutex // protects m
m map[string]*call // lazily initialized m map[string]*call // lazily initialized
dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges
} }
// Do executes and returns the results of the given function, making // Do executes and returns the results of the given function, making
@ -49,9 +51,11 @@ func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v
c.val, c.rtt, c.err = fn() c.val, c.rtt, c.err = fn()
c.wg.Done() c.wg.Done()
g.Lock() if !g.dontDeleteForTesting {
delete(g.m, key) g.Lock()
g.Unlock() delete(g.m, key)
g.Unlock()
}
return c.val, c.rtt, c.err, c.dups > 0 return c.val, c.rtt, c.err, c.dups > 0
} }

View file

@ -14,10 +14,7 @@ func (r *SMIMEA) Sign(usage, selector, matchingType int, cert *x509.Certificate)
r.MatchingType = uint8(matchingType) r.MatchingType = uint8(matchingType)
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil { return err
return err
}
return nil
} }
// Verify verifies a SMIMEA record against an SSL certificate. If it is OK // Verify verifies a SMIMEA record against an SSL certificate. If it is OK

View file

@ -14,10 +14,7 @@ func (r *TLSA) Sign(usage, selector, matchingType int, cert *x509.Certificate) (
r.MatchingType = uint8(matchingType) r.MatchingType = uint8(matchingType)
r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert) r.Certificate, err = CertificateToDANE(r.Selector, r.MatchingType, cert)
if err != nil { return err
return err
}
return nil
} }
// Verify verifies a TLSA record against an SSL certificate. If it is OK // Verify verifies a TLSA record against an SSL certificate. If it is OK

21
vendor/github.com/miekg/dns/tsig.go generated vendored
View file

@ -54,6 +54,10 @@ func (rr *TSIG) String() string {
return s return s
} }
func (rr *TSIG) parse(c *zlexer, origin, file string) *ParseError {
panic("dns: internal error: parse should never be called on TSIG")
}
// The following values must be put in wireformat, so that the MAC can be calculated. // The following values must be put in wireformat, so that the MAC can be calculated.
// RFC 2845, section 3.4.2. TSIG Variables. // RFC 2845, section 3.4.2. TSIG Variables.
type tsigWireFmt struct { type tsigWireFmt struct {
@ -113,13 +117,13 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
var h hash.Hash var h hash.Hash
switch strings.ToLower(rr.Algorithm) { switch strings.ToLower(rr.Algorithm) {
case HmacMD5: case HmacMD5:
h = hmac.New(md5.New, []byte(rawsecret)) h = hmac.New(md5.New, rawsecret)
case HmacSHA1: case HmacSHA1:
h = hmac.New(sha1.New, []byte(rawsecret)) h = hmac.New(sha1.New, rawsecret)
case HmacSHA256: case HmacSHA256:
h = hmac.New(sha256.New, []byte(rawsecret)) h = hmac.New(sha256.New, rawsecret)
case HmacSHA512: case HmacSHA512:
h = hmac.New(sha512.New, []byte(rawsecret)) h = hmac.New(sha512.New, rawsecret)
default: default:
return nil, "", ErrKeyAlg return nil, "", ErrKeyAlg
} }
@ -133,13 +137,12 @@ func TsigGenerate(m *Msg, secret, requestMAC string, timersOnly bool) ([]byte, s
t.Algorithm = rr.Algorithm t.Algorithm = rr.Algorithm
t.OrigId = m.Id t.OrigId = m.Id
tbuf := make([]byte, t.len()) tbuf := make([]byte, Len(t))
if off, err := PackRR(t, tbuf, 0, nil, false); err == nil { off, err := PackRR(t, tbuf, 0, nil, false)
tbuf = tbuf[:off] // reset to actual size used if err != nil {
} else {
return nil, "", err return nil, "", err
} }
mbuf = append(mbuf, tbuf...) mbuf = append(mbuf, tbuf[:off]...)
// Update the ArCount directly in the buffer. // Update the ArCount directly in the buffer.
binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1)) binary.BigEndian.PutUint16(mbuf[10:], uint16(len(m.Extra)+1))

115
vendor/github.com/miekg/dns/types.go generated vendored
View file

@ -205,9 +205,6 @@ var CertTypeToString = map[uint16]string{
CertOID: "OID", CertOID: "OID",
} }
// StringToCertType is the reverseof CertTypeToString.
var StringToCertType = reverseInt16(CertTypeToString)
//go:generate go run types_generate.go //go:generate go run types_generate.go
// Question holds a DNS question. There can be multiple questions in the // Question holds a DNS question. There can be multiple questions in the
@ -218,8 +215,10 @@ type Question struct {
Qclass uint16 Qclass uint16
} }
func (q *Question) len() int { func (q *Question) len(off int, compression map[string]struct{}) int {
return len(q.Name) + 1 + 2 + 2 l := domainNameLen(q.Name, off, compression, true)
l += 2 + 2
return l
} }
func (q *Question) String() (s string) { func (q *Question) String() (s string) {
@ -239,6 +238,25 @@ type ANY struct {
func (rr *ANY) String() string { return rr.Hdr.String() } func (rr *ANY) String() string { return rr.Hdr.String() }
func (rr *ANY) parse(c *zlexer, origin, file string) *ParseError {
panic("dns: internal error: parse should never be called on ANY")
}
// NULL RR. See RFC 1035.
type NULL struct {
Hdr RR_Header
Data string `dns:"any"`
}
func (rr *NULL) String() string {
// There is no presentation format; prefix string with a comment.
return ";" + rr.Hdr.String() + rr.Data
}
func (rr *NULL) parse(c *zlexer, origin, file string) *ParseError {
panic("dns: internal error: parse should never be called on NULL")
}
// CNAME RR. See RFC 1034. // CNAME RR. See RFC 1034.
type CNAME struct { type CNAME struct {
Hdr RR_Header Hdr RR_Header
@ -351,7 +369,7 @@ func (rr *X25) String() string {
type RT struct { type RT struct {
Hdr RR_Header Hdr RR_Header
Preference uint16 Preference uint16
Host string `dns:"cdomain-name"` Host string `dns:"domain-name"` // RFC 3597 prohibits compressing records not defined in RFC 1035.
} }
func (rr *RT) String() string { func (rr *RT) String() string {
@ -386,7 +404,7 @@ type RP struct {
} }
func (rr *RP) String() string { func (rr *RP) String() string {
return rr.Hdr.String() + rr.Mbox + " " + sprintTxt([]string{rr.Txt}) return rr.Hdr.String() + sprintName(rr.Mbox) + " " + sprintName(rr.Txt)
} }
// SOA RR. See RFC 1035. // SOA RR. See RFC 1035.
@ -460,7 +478,7 @@ func sprintTxtOctet(s string) string {
case b == '.': case b == '.':
dst.WriteByte('.') dst.WriteByte('.')
case b < ' ' || b > '~': case b < ' ' || b > '~':
writeEscapedByte(&dst, b) dst.WriteString(escapeByte(b))
default: default:
dst.WriteByte(b) dst.WriteByte(b)
} }
@ -508,20 +526,44 @@ func writeTXTStringByte(s *strings.Builder, b byte) {
s.WriteByte('\\') s.WriteByte('\\')
s.WriteByte(b) s.WriteByte(b)
case b < ' ' || b > '~': case b < ' ' || b > '~':
writeEscapedByte(s, b) s.WriteString(escapeByte(b))
default: default:
s.WriteByte(b) s.WriteByte(b)
} }
} }
func writeEscapedByte(s *strings.Builder, b byte) { const (
var buf [3]byte escapedByteSmall = "" +
bufs := strconv.AppendInt(buf[:0], int64(b), 10) `\000\001\002\003\004\005\006\007\008\009` +
s.WriteByte('\\') `\010\011\012\013\014\015\016\017\018\019` +
for i := len(bufs); i < 3; i++ { `\020\021\022\023\024\025\026\027\028\029` +
s.WriteByte('0') `\030\031`
escapedByteLarge = `\127\128\129` +
`\130\131\132\133\134\135\136\137\138\139` +
`\140\141\142\143\144\145\146\147\148\149` +
`\150\151\152\153\154\155\156\157\158\159` +
`\160\161\162\163\164\165\166\167\168\169` +
`\170\171\172\173\174\175\176\177\178\179` +
`\180\181\182\183\184\185\186\187\188\189` +
`\190\191\192\193\194\195\196\197\198\199` +
`\200\201\202\203\204\205\206\207\208\209` +
`\210\211\212\213\214\215\216\217\218\219` +
`\220\221\222\223\224\225\226\227\228\229` +
`\230\231\232\233\234\235\236\237\238\239` +
`\240\241\242\243\244\245\246\247\248\249` +
`\250\251\252\253\254\255`
)
// escapeByte returns the \DDD escaping of b which must
// satisfy b < ' ' || b > '~'.
func escapeByte(b byte) string {
if b < ' ' {
return escapedByteSmall[b*4 : b*4+4]
} }
s.Write(bufs)
b -= '~' + 1
// The cast here is needed as b*4 may overflow byte.
return escapedByteLarge[int(b)*4 : int(b)*4+4]
} }
func nextByte(s string, offset int) (byte, int) { func nextByte(s string, offset int) (byte, int) {
@ -803,14 +845,15 @@ type NSEC struct {
func (rr *NSEC) String() string { func (rr *NSEC) String() string {
s := rr.Hdr.String() + sprintName(rr.NextDomain) s := rr.Hdr.String() + sprintName(rr.NextDomain)
for i := 0; i < len(rr.TypeBitMap); i++ { for _, t := range rr.TypeBitMap {
s += " " + Type(rr.TypeBitMap[i]).String() s += " " + Type(t).String()
} }
return s return s
} }
func (rr *NSEC) len() int { func (rr *NSEC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() + len(rr.NextDomain) + 1 l := rr.Hdr.len(off, compression)
l += domainNameLen(rr.NextDomain, off+l, compression, false)
lastwindow := uint32(2 ^ 32 + 1) lastwindow := uint32(2 ^ 32 + 1)
for _, t := range rr.TypeBitMap { for _, t := range rr.TypeBitMap {
window := t / 256 window := t / 256
@ -968,14 +1011,15 @@ func (rr *NSEC3) String() string {
" " + strconv.Itoa(int(rr.Iterations)) + " " + strconv.Itoa(int(rr.Iterations)) +
" " + saltToString(rr.Salt) + " " + saltToString(rr.Salt) +
" " + rr.NextDomain " " + rr.NextDomain
for i := 0; i < len(rr.TypeBitMap); i++ { for _, t := range rr.TypeBitMap {
s += " " + Type(rr.TypeBitMap[i]).String() s += " " + Type(t).String()
} }
return s return s
} }
func (rr *NSEC3) len() int { func (rr *NSEC3) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() + 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1 l := rr.Hdr.len(off, compression)
l += 6 + len(rr.Salt)/2 + 1 + len(rr.NextDomain) + 1
lastwindow := uint32(2 ^ 32 + 1) lastwindow := uint32(2 ^ 32 + 1)
for _, t := range rr.TypeBitMap { for _, t := range rr.TypeBitMap {
window := t / 256 window := t / 256
@ -1022,10 +1066,16 @@ type TKEY struct {
// TKEY has no official presentation format, but this will suffice. // TKEY has no official presentation format, but this will suffice.
func (rr *TKEY) String() string { func (rr *TKEY) String() string {
s := "\n;; TKEY PSEUDOSECTION:\n" s := ";" + rr.Hdr.String() +
s += rr.Hdr.String() + " " + rr.Algorithm + " " + " " + rr.Algorithm +
strconv.Itoa(int(rr.KeySize)) + " " + rr.Key + " " + " " + TimeToString(rr.Inception) +
strconv.Itoa(int(rr.OtherLen)) + " " + rr.OtherData " " + TimeToString(rr.Expiration) +
" " + strconv.Itoa(int(rr.Mode)) +
" " + strconv.Itoa(int(rr.Error)) +
" " + strconv.Itoa(int(rr.KeySize)) +
" " + rr.Key +
" " + strconv.Itoa(int(rr.OtherLen)) +
" " + rr.OtherData
return s return s
} }
@ -1285,14 +1335,15 @@ type CSYNC struct {
func (rr *CSYNC) String() string { func (rr *CSYNC) String() string {
s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags)) s := rr.Hdr.String() + strconv.FormatInt(int64(rr.Serial), 10) + " " + strconv.Itoa(int(rr.Flags))
for i := 0; i < len(rr.TypeBitMap); i++ { for _, t := range rr.TypeBitMap {
s += " " + Type(rr.TypeBitMap[i]).String() s += " " + Type(t).String()
} }
return s return s
} }
func (rr *CSYNC) len() int { func (rr *CSYNC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() + 4 + 2 l := rr.Hdr.len(off, compression)
l += 4 + 2
lastwindow := uint32(2 ^ 32 + 1) lastwindow := uint32(2 ^ 32 + 1)
for _, t := range rr.TypeBitMap { for _, t := range rr.TypeBitMap {
window := t / 256 window := t / 256

View file

@ -153,8 +153,8 @@ func main() {
if isEmbedded { if isEmbedded {
continue continue
} }
fmt.Fprintf(b, "func (rr *%s) len() int {\n", name) fmt.Fprintf(b, "func (rr *%s) len(off int, compression map[string]struct{}) int {\n", name)
fmt.Fprintf(b, "l := rr.Hdr.len()\n") fmt.Fprintf(b, "l := rr.Hdr.len(off, compression)\n")
for i := 1; i < st.NumFields(); i++ { for i := 1; i < st.NumFields(); i++ {
o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) } o := func(s string) { fmt.Fprintf(b, s, st.Field(i).Name()) }
@ -162,7 +162,11 @@ func main() {
switch st.Tag(i) { switch st.Tag(i) {
case `dns:"-"`: case `dns:"-"`:
// ignored // ignored
case `dns:"cdomain-name"`, `dns:"domain-name"`, `dns:"txt"`: case `dns:"cdomain-name"`:
o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, true) }\n")
case `dns:"domain-name"`:
o("for _, x := range rr.%s { l += domainNameLen(x, off+l, compression, false) }\n")
case `dns:"txt"`:
o("for _, x := range rr.%s { l += len(x) + 1 }\n") o("for _, x := range rr.%s { l += len(x) + 1 }\n")
default: default:
log.Fatalln(name, st.Field(i).Name(), st.Tag(i)) log.Fatalln(name, st.Field(i).Name(), st.Tag(i))
@ -173,8 +177,10 @@ func main() {
switch { switch {
case st.Tag(i) == `dns:"-"`: case st.Tag(i) == `dns:"-"`:
// ignored // ignored
case st.Tag(i) == `dns:"cdomain-name"`, st.Tag(i) == `dns:"domain-name"`: case st.Tag(i) == `dns:"cdomain-name"`:
o("l += len(rr.%s) + 1\n") o("l += domainNameLen(rr.%s, off+l, compression, true)\n")
case st.Tag(i) == `dns:"domain-name"`:
o("l += domainNameLen(rr.%s, off+l, compression, false)\n")
case st.Tag(i) == `dns:"octet"`: case st.Tag(i) == `dns:"octet"`:
o("l += len(rr.%s)\n") o("l += len(rr.%s)\n")
case strings.HasPrefix(st.Tag(i), `dns:"size-base64`): case strings.HasPrefix(st.Tag(i), `dns:"size-base64`):
@ -187,10 +193,12 @@ func main() {
fallthrough fallthrough
case st.Tag(i) == `dns:"hex"`: case st.Tag(i) == `dns:"hex"`:
o("l += len(rr.%s)/2 + 1\n") o("l += len(rr.%s)/2 + 1\n")
case st.Tag(i) == `dns:"any"`:
o("l += len(rr.%s)\n")
case st.Tag(i) == `dns:"a"`: case st.Tag(i) == `dns:"a"`:
o("l += net.IPv4len // %s\n") o("if len(rr.%s) != 0 { l += net.IPv4len }\n")
case st.Tag(i) == `dns:"aaaa"`: case st.Tag(i) == `dns:"aaaa"`:
o("l += net.IPv6len // %s\n") o("if len(rr.%s) != 0 { l += net.IPv6len }\n")
case st.Tag(i) == `dns:"txt"`: case st.Tag(i) == `dns:"txt"`:
o("for _, t := range rr.%s { l += len(t) + 1 }\n") o("for _, t := range rr.%s { l += len(t) + 1 }\n")
case st.Tag(i) == `dns:"uint48"`: case st.Tag(i) == `dns:"uint48"`:
@ -236,6 +244,13 @@ func main() {
splits := strings.Split(t, ".") splits := strings.Split(t, ".")
t = splits[len(splits)-1] t = splits[len(splits)-1]
} }
// For the EDNS0 interface (used in the OPT RR), we need to call the copy method on each element.
if t == "EDNS0" {
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s));\nfor i,e := range rr.%s {\n %s[i] = e.copy()\n}\n",
f, t, f, f, f)
fields = append(fields, f)
continue
}
fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n", fmt.Fprintf(b, "%s := make([]%s, len(rr.%s)); copy(%s, rr.%s)\n",
f, t, f, f, f) f, t, f, f, f)
fields = append(fields, f) fields = append(fields, f)

View file

@ -20,15 +20,13 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) {
if err != nil { if err != nil {
return n, nil, err return n, nil, err
} }
session := &SessionUDP{raddr.(*net.UDPAddr)} return n, &SessionUDP{raddr.(*net.UDPAddr)}, err
return n, session, err
} }
// WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. // WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr.
// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. // TODO(fastest963): Once go1.10 is released, use WriteMsgUDP.
func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) {
n, err := conn.WriteTo(b, session.raddr) return conn.WriteTo(b, session.raddr)
return n, err
} }
// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods // TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods

View file

@ -44,7 +44,8 @@ func (u *Msg) RRsetUsed(rr []RR) {
u.Answer = make([]RR, 0, len(rr)) u.Answer = make([]RR, 0, len(rr))
} }
for _, r := range rr { for _, r := range rr {
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) h := r.Header()
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}})
} }
} }
@ -55,7 +56,8 @@ func (u *Msg) RRsetNotUsed(rr []RR) {
u.Answer = make([]RR, 0, len(rr)) u.Answer = make([]RR, 0, len(rr))
} }
for _, r := range rr { for _, r := range rr {
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassNONE}}) h := r.Header()
u.Answer = append(u.Answer, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassNONE}})
} }
} }
@ -79,7 +81,8 @@ func (u *Msg) RemoveRRset(rr []RR) {
u.Ns = make([]RR, 0, len(rr)) u.Ns = make([]RR, 0, len(rr))
} }
for _, r := range rr { for _, r := range rr {
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: r.Header().Name, Ttl: 0, Rrtype: r.Header().Rrtype, Class: ClassANY}}) h := r.Header()
u.Ns = append(u.Ns, &ANY{Hdr: RR_Header{Name: h.Name, Ttl: 0, Rrtype: h.Rrtype, Class: ClassANY}})
} }
} }
@ -99,8 +102,9 @@ func (u *Msg) Remove(rr []RR) {
u.Ns = make([]RR, 0, len(rr)) u.Ns = make([]RR, 0, len(rr))
} }
for _, r := range rr { for _, r := range rr {
r.Header().Class = ClassNONE h := r.Header()
r.Header().Ttl = 0 h.Class = ClassNONE
h.Ttl = 0
u.Ns = append(u.Ns, r) u.Ns = append(u.Ns, r)
} }
} }

View file

@ -3,7 +3,7 @@ package dns
import "fmt" import "fmt"
// Version is current version of this library. // Version is current version of this library.
var Version = V{1, 1, 0} var Version = V{1, 1, 9}
// V holds the version of this library. // V holds the version of this library.
type V struct { type V struct {

46
vendor/github.com/miekg/dns/xfr.go generated vendored
View file

@ -35,30 +35,36 @@ type Transfer struct {
// channel, err := transfer.In(message, master) // channel, err := transfer.In(message, master)
// //
func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) {
switch q.Question[0].Qtype {
case TypeAXFR, TypeIXFR:
default:
return nil, &Error{"unsupported question type"}
}
timeout := dnsTimeout timeout := dnsTimeout
if t.DialTimeout != 0 { if t.DialTimeout != 0 {
timeout = t.DialTimeout timeout = t.DialTimeout
} }
if t.Conn == nil { if t.Conn == nil {
t.Conn, err = DialTimeout("tcp", a, timeout) t.Conn, err = DialTimeout("tcp", a, timeout)
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
if err := t.WriteMsg(q); err != nil { if err := t.WriteMsg(q); err != nil {
return nil, err return nil, err
} }
env = make(chan *Envelope) env = make(chan *Envelope)
go func() { switch q.Question[0].Qtype {
if q.Question[0].Qtype == TypeAXFR { case TypeAXFR:
go t.inAxfr(q, env) go t.inAxfr(q, env)
return case TypeIXFR:
} go t.inIxfr(q, env)
if q.Question[0].Qtype == TypeIXFR { }
go t.inIxfr(q, env)
return
}
}()
return env, nil return env, nil
} }
@ -111,7 +117,7 @@ func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) {
} }
func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) {
serial := uint32(0) // The first serial seen is the current server serial var serial uint32 // The first serial seen is the current server serial
axfr := true axfr := true
n := 0 n := 0
qser := q.Ns[0].(*SOA).Serial qser := q.Ns[0].(*SOA).Serial
@ -237,24 +243,18 @@ func (t *Transfer) WriteMsg(m *Msg) (err error) {
if err != nil { if err != nil {
return err return err
} }
if _, err = t.Write(out); err != nil { _, err = t.Write(out)
return err return err
}
return nil
} }
func isSOAFirst(in *Msg) bool { func isSOAFirst(in *Msg) bool {
if len(in.Answer) > 0 { return len(in.Answer) > 0 &&
return in.Answer[0].Header().Rrtype == TypeSOA in.Answer[0].Header().Rrtype == TypeSOA
}
return false
} }
func isSOALast(in *Msg) bool { func isSOALast(in *Msg) bool {
if len(in.Answer) > 0 { return len(in.Answer) > 0 &&
return in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA in.Answer[len(in.Answer)-1].Header().Rrtype == TypeSOA
}
return false
} }
const errXFR = "bad xfr rcode: %d" const errXFR = "bad xfr rcode: %d"

View file

@ -1,152 +0,0 @@
// Code generated by "go run compress_generate.go"; DO NOT EDIT.
package dns
func compressionLenHelperType(c map[string]struct{}, r RR, initLen int) int {
currentLen := initLen
switch x := r.(type) {
case *AFSDB:
currentLen -= len(x.Hostname) + 1
currentLen += compressionLenHelper(c, x.Hostname, currentLen)
case *CNAME:
currentLen -= len(x.Target) + 1
currentLen += compressionLenHelper(c, x.Target, currentLen)
case *DNAME:
currentLen -= len(x.Target) + 1
currentLen += compressionLenHelper(c, x.Target, currentLen)
case *HIP:
for i := range x.RendezvousServers {
currentLen -= len(x.RendezvousServers[i]) + 1
}
for i := range x.RendezvousServers {
currentLen += compressionLenHelper(c, x.RendezvousServers[i], currentLen)
}
case *KX:
currentLen -= len(x.Exchanger) + 1
currentLen += compressionLenHelper(c, x.Exchanger, currentLen)
case *LP:
currentLen -= len(x.Fqdn) + 1
currentLen += compressionLenHelper(c, x.Fqdn, currentLen)
case *MB:
currentLen -= len(x.Mb) + 1
currentLen += compressionLenHelper(c, x.Mb, currentLen)
case *MD:
currentLen -= len(x.Md) + 1
currentLen += compressionLenHelper(c, x.Md, currentLen)
case *MF:
currentLen -= len(x.Mf) + 1
currentLen += compressionLenHelper(c, x.Mf, currentLen)
case *MG:
currentLen -= len(x.Mg) + 1
currentLen += compressionLenHelper(c, x.Mg, currentLen)
case *MINFO:
currentLen -= len(x.Rmail) + 1
currentLen += compressionLenHelper(c, x.Rmail, currentLen)
currentLen -= len(x.Email) + 1
currentLen += compressionLenHelper(c, x.Email, currentLen)
case *MR:
currentLen -= len(x.Mr) + 1
currentLen += compressionLenHelper(c, x.Mr, currentLen)
case *MX:
currentLen -= len(x.Mx) + 1
currentLen += compressionLenHelper(c, x.Mx, currentLen)
case *NAPTR:
currentLen -= len(x.Replacement) + 1
currentLen += compressionLenHelper(c, x.Replacement, currentLen)
case *NS:
currentLen -= len(x.Ns) + 1
currentLen += compressionLenHelper(c, x.Ns, currentLen)
case *NSAPPTR:
currentLen -= len(x.Ptr) + 1
currentLen += compressionLenHelper(c, x.Ptr, currentLen)
case *NSEC:
currentLen -= len(x.NextDomain) + 1
currentLen += compressionLenHelper(c, x.NextDomain, currentLen)
case *PTR:
currentLen -= len(x.Ptr) + 1
currentLen += compressionLenHelper(c, x.Ptr, currentLen)
case *PX:
currentLen -= len(x.Map822) + 1
currentLen += compressionLenHelper(c, x.Map822, currentLen)
currentLen -= len(x.Mapx400) + 1
currentLen += compressionLenHelper(c, x.Mapx400, currentLen)
case *RP:
currentLen -= len(x.Mbox) + 1
currentLen += compressionLenHelper(c, x.Mbox, currentLen)
currentLen -= len(x.Txt) + 1
currentLen += compressionLenHelper(c, x.Txt, currentLen)
case *RRSIG:
currentLen -= len(x.SignerName) + 1
currentLen += compressionLenHelper(c, x.SignerName, currentLen)
case *RT:
currentLen -= len(x.Host) + 1
currentLen += compressionLenHelper(c, x.Host, currentLen)
case *SIG:
currentLen -= len(x.SignerName) + 1
currentLen += compressionLenHelper(c, x.SignerName, currentLen)
case *SOA:
currentLen -= len(x.Ns) + 1
currentLen += compressionLenHelper(c, x.Ns, currentLen)
currentLen -= len(x.Mbox) + 1
currentLen += compressionLenHelper(c, x.Mbox, currentLen)
case *SRV:
currentLen -= len(x.Target) + 1
currentLen += compressionLenHelper(c, x.Target, currentLen)
case *TALINK:
currentLen -= len(x.PreviousName) + 1
currentLen += compressionLenHelper(c, x.PreviousName, currentLen)
currentLen -= len(x.NextName) + 1
currentLen += compressionLenHelper(c, x.NextName, currentLen)
case *TKEY:
currentLen -= len(x.Algorithm) + 1
currentLen += compressionLenHelper(c, x.Algorithm, currentLen)
case *TSIG:
currentLen -= len(x.Algorithm) + 1
currentLen += compressionLenHelper(c, x.Algorithm, currentLen)
}
return currentLen - initLen
}
func compressionLenSearchType(c map[string]struct{}, r RR) (int, bool, int) {
switch x := r.(type) {
case *CNAME:
k1, ok1, sz1 := compressionLenSearch(c, x.Target)
return k1, ok1, sz1
case *MB:
k1, ok1, sz1 := compressionLenSearch(c, x.Mb)
return k1, ok1, sz1
case *MD:
k1, ok1, sz1 := compressionLenSearch(c, x.Md)
return k1, ok1, sz1
case *MF:
k1, ok1, sz1 := compressionLenSearch(c, x.Mf)
return k1, ok1, sz1
case *MG:
k1, ok1, sz1 := compressionLenSearch(c, x.Mg)
return k1, ok1, sz1
case *MINFO:
k1, ok1, sz1 := compressionLenSearch(c, x.Rmail)
k2, ok2, sz2 := compressionLenSearch(c, x.Email)
return k1 + k2, ok1 && ok2, sz1 + sz2
case *MR:
k1, ok1, sz1 := compressionLenSearch(c, x.Mr)
return k1, ok1, sz1
case *MX:
k1, ok1, sz1 := compressionLenSearch(c, x.Mx)
return k1, ok1, sz1
case *NS:
k1, ok1, sz1 := compressionLenSearch(c, x.Ns)
return k1, ok1, sz1
case *PTR:
k1, ok1, sz1 := compressionLenSearch(c, x.Ptr)
return k1, ok1, sz1
case *RT:
k1, ok1, sz1 := compressionLenSearch(c, x.Host)
return k1, ok1, sz1
case *SOA:
k1, ok1, sz1 := compressionLenSearch(c, x.Ns)
k2, ok2, sz2 := compressionLenSearch(c, x.Mbox)
return k1 + k2, ok1 && ok2, sz1 + sz2
}
return 0, false, 0
}

File diff suppressed because it is too large Load diff

2039
vendor/github.com/miekg/dns/zmsg.go generated vendored

File diff suppressed because it is too large Load diff

334
vendor/github.com/miekg/dns/ztypes.go generated vendored
View file

@ -54,6 +54,7 @@ var TypeToRR = map[uint16]func() RR{
TypeNSEC: func() RR { return new(NSEC) }, TypeNSEC: func() RR { return new(NSEC) },
TypeNSEC3: func() RR { return new(NSEC3) }, TypeNSEC3: func() RR { return new(NSEC3) },
TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) },
TypeNULL: func() RR { return new(NULL) },
TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) },
TypeOPT: func() RR { return new(OPT) }, TypeOPT: func() RR { return new(OPT) },
TypePTR: func() RR { return new(PTR) }, TypePTR: func() RR { return new(PTR) },
@ -209,6 +210,7 @@ func (rr *NSAPPTR) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr }
func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr }
func (rr *NULL) Header() *RR_Header { return &rr.Hdr }
func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr }
func (rr *OPT) Header() *RR_Header { return &rr.Hdr } func (rr *OPT) Header() *RR_Header { return &rr.Hdr }
func (rr *PTR) Header() *RR_Header { return &rr.Hdr } func (rr *PTR) Header() *RR_Header { return &rr.Hdr }
@ -236,144 +238,150 @@ func (rr *URI) Header() *RR_Header { return &rr.Hdr }
func (rr *X25) Header() *RR_Header { return &rr.Hdr } func (rr *X25) Header() *RR_Header { return &rr.Hdr }
// len() functions // len() functions
func (rr *A) len() int { func (rr *A) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += net.IPv4len // A if len(rr.A) != 0 {
l += net.IPv4len
}
return l return l
} }
func (rr *AAAA) len() int { func (rr *AAAA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += net.IPv6len // AAAA if len(rr.AAAA) != 0 {
l += net.IPv6len
}
return l return l
} }
func (rr *AFSDB) len() int { func (rr *AFSDB) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Subtype l += 2 // Subtype
l += len(rr.Hostname) + 1 l += domainNameLen(rr.Hostname, off+l, compression, false)
return l return l
} }
func (rr *ANY) len() int { func (rr *ANY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
return l return l
} }
func (rr *AVC) len() int { func (rr *AVC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt { for _, x := range rr.Txt {
l += len(x) + 1 l += len(x) + 1
} }
return l return l
} }
func (rr *CAA) len() int { func (rr *CAA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l++ // Flag l++ // Flag
l += len(rr.Tag) + 1 l += len(rr.Tag) + 1
l += len(rr.Value) l += len(rr.Value)
return l return l
} }
func (rr *CERT) len() int { func (rr *CERT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Type l += 2 // Type
l += 2 // KeyTag l += 2 // KeyTag
l++ // Algorithm l++ // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) l += base64.StdEncoding.DecodedLen(len(rr.Certificate))
return l return l
} }
func (rr *CNAME) len() int { func (rr *CNAME) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Target) + 1 l += domainNameLen(rr.Target, off+l, compression, true)
return l return l
} }
func (rr *DHCID) len() int { func (rr *DHCID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += base64.StdEncoding.DecodedLen(len(rr.Digest)) l += base64.StdEncoding.DecodedLen(len(rr.Digest))
return l return l
} }
func (rr *DNAME) len() int { func (rr *DNAME) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Target) + 1 l += domainNameLen(rr.Target, off+l, compression, false)
return l return l
} }
func (rr *DNSKEY) len() int { func (rr *DNSKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Flags l += 2 // Flags
l++ // Protocol l++ // Protocol
l++ // Algorithm l++ // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l return l
} }
func (rr *DS) len() int { func (rr *DS) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // KeyTag l += 2 // KeyTag
l++ // Algorithm l++ // Algorithm
l++ // DigestType l++ // DigestType
l += len(rr.Digest)/2 + 1 l += len(rr.Digest)/2 + 1
return l return l
} }
func (rr *EID) len() int { func (rr *EID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Endpoint)/2 + 1 l += len(rr.Endpoint)/2 + 1
return l return l
} }
func (rr *EUI48) len() int { func (rr *EUI48) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 6 // Address l += 6 // Address
return l return l
} }
func (rr *EUI64) len() int { func (rr *EUI64) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 8 // Address l += 8 // Address
return l return l
} }
func (rr *GID) len() int { func (rr *GID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 4 // Gid l += 4 // Gid
return l return l
} }
func (rr *GPOS) len() int { func (rr *GPOS) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Longitude) + 1 l += len(rr.Longitude) + 1
l += len(rr.Latitude) + 1 l += len(rr.Latitude) + 1
l += len(rr.Altitude) + 1 l += len(rr.Altitude) + 1
return l return l
} }
func (rr *HINFO) len() int { func (rr *HINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Cpu) + 1 l += len(rr.Cpu) + 1
l += len(rr.Os) + 1 l += len(rr.Os) + 1
return l return l
} }
func (rr *HIP) len() int { func (rr *HIP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l++ // HitLength l++ // HitLength
l++ // PublicKeyAlgorithm l++ // PublicKeyAlgorithm
l += 2 // PublicKeyLength l += 2 // PublicKeyLength
l += len(rr.Hit) / 2 l += len(rr.Hit) / 2
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
for _, x := range rr.RendezvousServers { for _, x := range rr.RendezvousServers {
l += len(x) + 1 l += domainNameLen(x, off+l, compression, false)
} }
return l return l
} }
func (rr *KX) len() int { func (rr *KX) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Preference l += 2 // Preference
l += len(rr.Exchanger) + 1 l += domainNameLen(rr.Exchanger, off+l, compression, false)
return l return l
} }
func (rr *L32) len() int { func (rr *L32) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Preference l += 2 // Preference
l += net.IPv4len // Locator32 if len(rr.Locator32) != 0 {
l += net.IPv4len
}
return l return l
} }
func (rr *L64) len() int { func (rr *L64) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Preference l += 2 // Preference
l += 8 // Locator64 l += 8 // Locator64
return l return l
} }
func (rr *LOC) len() int { func (rr *LOC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l++ // Version l++ // Version
l++ // Size l++ // Size
l++ // HorizPre l++ // HorizPre
@ -383,89 +391,89 @@ func (rr *LOC) len() int {
l += 4 // Altitude l += 4 // Altitude
return l return l
} }
func (rr *LP) len() int { func (rr *LP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Preference l += 2 // Preference
l += len(rr.Fqdn) + 1 l += domainNameLen(rr.Fqdn, off+l, compression, false)
return l return l
} }
func (rr *MB) len() int { func (rr *MB) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Mb) + 1 l += domainNameLen(rr.Mb, off+l, compression, true)
return l return l
} }
func (rr *MD) len() int { func (rr *MD) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Md) + 1 l += domainNameLen(rr.Md, off+l, compression, true)
return l return l
} }
func (rr *MF) len() int { func (rr *MF) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Mf) + 1 l += domainNameLen(rr.Mf, off+l, compression, true)
return l return l
} }
func (rr *MG) len() int { func (rr *MG) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Mg) + 1 l += domainNameLen(rr.Mg, off+l, compression, true)
return l return l
} }
func (rr *MINFO) len() int { func (rr *MINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Rmail) + 1 l += domainNameLen(rr.Rmail, off+l, compression, true)
l += len(rr.Email) + 1 l += domainNameLen(rr.Email, off+l, compression, true)
return l return l
} }
func (rr *MR) len() int { func (rr *MR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Mr) + 1 l += domainNameLen(rr.Mr, off+l, compression, true)
return l return l
} }
func (rr *MX) len() int { func (rr *MX) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Preference l += 2 // Preference
l += len(rr.Mx) + 1 l += domainNameLen(rr.Mx, off+l, compression, true)
return l return l
} }
func (rr *NAPTR) len() int { func (rr *NAPTR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Order l += 2 // Order
l += 2 // Preference l += 2 // Preference
l += len(rr.Flags) + 1 l += len(rr.Flags) + 1
l += len(rr.Service) + 1 l += len(rr.Service) + 1
l += len(rr.Regexp) + 1 l += len(rr.Regexp) + 1
l += len(rr.Replacement) + 1 l += domainNameLen(rr.Replacement, off+l, compression, false)
return l return l
} }
func (rr *NID) len() int { func (rr *NID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Preference l += 2 // Preference
l += 8 // NodeID l += 8 // NodeID
return l return l
} }
func (rr *NIMLOC) len() int { func (rr *NIMLOC) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Locator)/2 + 1 l += len(rr.Locator)/2 + 1
return l return l
} }
func (rr *NINFO) len() int { func (rr *NINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
for _, x := range rr.ZSData { for _, x := range rr.ZSData {
l += len(x) + 1 l += len(x) + 1
} }
return l return l
} }
func (rr *NS) len() int { func (rr *NS) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Ns) + 1 l += domainNameLen(rr.Ns, off+l, compression, true)
return l return l
} }
func (rr *NSAPPTR) len() int { func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Ptr) + 1 l += domainNameLen(rr.Ptr, off+l, compression, false)
return l return l
} }
func (rr *NSEC3PARAM) len() int { func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l++ // Hash l++ // Hash
l++ // Flags l++ // Flags
l += 2 // Iterations l += 2 // Iterations
@ -473,44 +481,49 @@ func (rr *NSEC3PARAM) len() int {
l += len(rr.Salt) / 2 l += len(rr.Salt) / 2
return l return l
} }
func (rr *OPENPGPKEY) len() int { func (rr *NULL) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Data)
return l
}
func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len(off, compression)
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l return l
} }
func (rr *PTR) len() int { func (rr *PTR) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Ptr) + 1 l += domainNameLen(rr.Ptr, off+l, compression, true)
return l return l
} }
func (rr *PX) len() int { func (rr *PX) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Preference l += 2 // Preference
l += len(rr.Map822) + 1 l += domainNameLen(rr.Map822, off+l, compression, false)
l += len(rr.Mapx400) + 1 l += domainNameLen(rr.Mapx400, off+l, compression, false)
return l return l
} }
func (rr *RFC3597) len() int { func (rr *RFC3597) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Rdata)/2 + 1 l += len(rr.Rdata)/2 + 1
return l return l
} }
func (rr *RKEY) len() int { func (rr *RKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Flags l += 2 // Flags
l++ // Protocol l++ // Protocol
l++ // Algorithm l++ // Algorithm
l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey))
return l return l
} }
func (rr *RP) len() int { func (rr *RP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Mbox) + 1 l += domainNameLen(rr.Mbox, off+l, compression, false)
l += len(rr.Txt) + 1 l += domainNameLen(rr.Txt, off+l, compression, false)
return l return l
} }
func (rr *RRSIG) len() int { func (rr *RRSIG) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // TypeCovered l += 2 // TypeCovered
l++ // Algorithm l++ // Algorithm
l++ // Labels l++ // Labels
@ -518,28 +531,28 @@ func (rr *RRSIG) len() int {
l += 4 // Expiration l += 4 // Expiration
l += 4 // Inception l += 4 // Inception
l += 2 // KeyTag l += 2 // KeyTag
l += len(rr.SignerName) + 1 l += domainNameLen(rr.SignerName, off+l, compression, false)
l += base64.StdEncoding.DecodedLen(len(rr.Signature)) l += base64.StdEncoding.DecodedLen(len(rr.Signature))
return l return l
} }
func (rr *RT) len() int { func (rr *RT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Preference l += 2 // Preference
l += len(rr.Host) + 1 l += domainNameLen(rr.Host, off+l, compression, false)
return l return l
} }
func (rr *SMIMEA) len() int { func (rr *SMIMEA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l++ // Usage l++ // Usage
l++ // Selector l++ // Selector
l++ // MatchingType l++ // MatchingType
l += len(rr.Certificate)/2 + 1 l += len(rr.Certificate)/2 + 1
return l return l
} }
func (rr *SOA) len() int { func (rr *SOA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Ns) + 1 l += domainNameLen(rr.Ns, off+l, compression, true)
l += len(rr.Mbox) + 1 l += domainNameLen(rr.Mbox, off+l, compression, true)
l += 4 // Serial l += 4 // Serial
l += 4 // Refresh l += 4 // Refresh
l += 4 // Retry l += 4 // Retry
@ -547,45 +560,45 @@ func (rr *SOA) len() int {
l += 4 // Minttl l += 4 // Minttl
return l return l
} }
func (rr *SPF) len() int { func (rr *SPF) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt { for _, x := range rr.Txt {
l += len(x) + 1 l += len(x) + 1
} }
return l return l
} }
func (rr *SRV) len() int { func (rr *SRV) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Priority l += 2 // Priority
l += 2 // Weight l += 2 // Weight
l += 2 // Port l += 2 // Port
l += len(rr.Target) + 1 l += domainNameLen(rr.Target, off+l, compression, false)
return l return l
} }
func (rr *SSHFP) len() int { func (rr *SSHFP) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l++ // Algorithm l++ // Algorithm
l++ // Type l++ // Type
l += len(rr.FingerPrint)/2 + 1 l += len(rr.FingerPrint)/2 + 1
return l return l
} }
func (rr *TA) len() int { func (rr *TA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // KeyTag l += 2 // KeyTag
l++ // Algorithm l++ // Algorithm
l++ // DigestType l++ // DigestType
l += len(rr.Digest)/2 + 1 l += len(rr.Digest)/2 + 1
return l return l
} }
func (rr *TALINK) len() int { func (rr *TALINK) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.PreviousName) + 1 l += domainNameLen(rr.PreviousName, off+l, compression, false)
l += len(rr.NextName) + 1 l += domainNameLen(rr.NextName, off+l, compression, false)
return l return l
} }
func (rr *TKEY) len() int { func (rr *TKEY) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Algorithm) + 1 l += domainNameLen(rr.Algorithm, off+l, compression, false)
l += 4 // Inception l += 4 // Inception
l += 4 // Expiration l += 4 // Expiration
l += 2 // Mode l += 2 // Mode
@ -596,17 +609,17 @@ func (rr *TKEY) len() int {
l += len(rr.OtherData) / 2 l += len(rr.OtherData) / 2
return l return l
} }
func (rr *TLSA) len() int { func (rr *TLSA) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l++ // Usage l++ // Usage
l++ // Selector l++ // Selector
l++ // MatchingType l++ // MatchingType
l += len(rr.Certificate)/2 + 1 l += len(rr.Certificate)/2 + 1
return l return l
} }
func (rr *TSIG) len() int { func (rr *TSIG) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Algorithm) + 1 l += domainNameLen(rr.Algorithm, off+l, compression, false)
l += 6 // TimeSigned l += 6 // TimeSigned
l += 2 // Fudge l += 2 // Fudge
l += 2 // MACSize l += 2 // MACSize
@ -617,32 +630,32 @@ func (rr *TSIG) len() int {
l += len(rr.OtherData) / 2 l += len(rr.OtherData) / 2
return l return l
} }
func (rr *TXT) len() int { func (rr *TXT) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
for _, x := range rr.Txt { for _, x := range rr.Txt {
l += len(x) + 1 l += len(x) + 1
} }
return l return l
} }
func (rr *UID) len() int { func (rr *UID) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 4 // Uid l += 4 // Uid
return l return l
} }
func (rr *UINFO) len() int { func (rr *UINFO) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.Uinfo) + 1 l += len(rr.Uinfo) + 1
return l return l
} }
func (rr *URI) len() int { func (rr *URI) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += 2 // Priority l += 2 // Priority
l += 2 // Weight l += 2 // Weight
l += len(rr.Target) l += len(rr.Target)
return l return l
} }
func (rr *X25) len() int { func (rr *X25) len(off int, compression map[string]struct{}) int {
l := rr.Hdr.len() l := rr.Hdr.len(off, compression)
l += len(rr.PSDNAddress) + 1 l += len(rr.PSDNAddress) + 1
return l return l
} }
@ -783,12 +796,17 @@ func (rr *NSEC3) copy() RR {
func (rr *NSEC3PARAM) copy() RR { func (rr *NSEC3PARAM) copy() RR {
return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt}
} }
func (rr *NULL) copy() RR {
return &NULL{rr.Hdr, rr.Data}
}
func (rr *OPENPGPKEY) copy() RR { func (rr *OPENPGPKEY) copy() RR {
return &OPENPGPKEY{rr.Hdr, rr.PublicKey} return &OPENPGPKEY{rr.Hdr, rr.PublicKey}
} }
func (rr *OPT) copy() RR { func (rr *OPT) copy() RR {
Option := make([]EDNS0, len(rr.Option)) Option := make([]EDNS0, len(rr.Option))
copy(Option, rr.Option) for i, e := range rr.Option {
Option[i] = e.copy()
}
return &OPT{rr.Hdr, Option} return &OPT{rr.Hdr, Option}
} }
func (rr *PTR) copy() RR { func (rr *PTR) copy() RR {

View file

@ -18,7 +18,7 @@ type ServiceError interface {
GetMessage() string GetMessage() string
// A short error code that defines the error, meant for programmatic parsing. // A short error code that defines the error, meant for programmatic parsing.
// See https://docs.us-phoenix-1.oraclecloud.com/Content/API/References/apierrors.htm // See https://docs.cloud.oracle.com/Content/API/References/apierrors.htm
GetCode() string GetCode() string
// Unique Oracle-assigned identifier for the request. // Unique Oracle-assigned identifier for the request.

View file

@ -263,6 +263,11 @@ func addToBody(request *http.Request, value reflect.Value, field reflect.StructF
if e != nil { if e != nil {
return return
} }
if defaultLogger.LogLevel() == verboseLogging {
Debugf("Marshaled body is: %s\n", string(marshaled))
}
bodyBytes := bytes.NewReader(marshaled) bodyBytes := bytes.NewReader(marshaled)
request.ContentLength = int64(bodyBytes.Len()) request.ContentLength = int64(bodyBytes.Len())
request.Header.Set(requestHeaderContentLength, strconv.FormatInt(request.ContentLength, 10)) request.Header.Set(requestHeaderContentLength, strconv.FormatInt(request.ContentLength, 10))

View file

@ -10,8 +10,8 @@ import (
) )
const ( const (
major = "4" major = "5"
minor = "0" minor = "4"
patch = "0" patch = "0"
tag = "" tag = ""
) )
@ -26,7 +26,7 @@ func Version() string {
verBuilder := bytes.NewBufferString(ver) verBuilder := bytes.NewBufferString(ver)
if tag != "" && tag != "-" { if tag != "" && tag != "-" {
_, err := verBuilder.WriteString(tag) _, err := verBuilder.WriteString(tag)
if err == nil { if err != nil {
verBuilder = bytes.NewBufferString(ver) verBuilder = bytes.NewBufferString(ver)
} }
} }

View file

@ -4,7 +4,7 @@
// DNS API // DNS API
// //
// API for the DNS service. Use this API to manage DNS zones, records, and other DNS resources. // API for the DNS service. Use this API to manage DNS zones, records, and other DNS resources.
// For more information, see Overview of the DNS Service (https://docs.us-phoenix-1.oraclecloud.com/iaas/Content/DNS/Concepts/dnszonemanagement.htm). // For more information, see Overview of the DNS Service (https://docs.cloud.oracle.com/iaas/Content/DNS/Concepts/dnszonemanagement.htm).
// //
package dns package dns
@ -14,12 +14,8 @@ import (
) )
// CreateSteeringPolicyAttachmentDetails The body for defining an attachment between a steering policy and a domain. // CreateSteeringPolicyAttachmentDetails The body for defining an attachment between a steering policy and a domain.
// An attachment occludes all records at its domain that are of a covered rtype, constructing //
// DNS responses from its steering policy rather than from those domain records. // **Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
// The attachment will cover every rtype that matches the rtype of an answer in its policy, and
// will cover all address rtypes (e.g., A and AAAA) if the policy includes at least one CNAME
// answer.
// A domain can have at most one attachment covering any given rtype.
type CreateSteeringPolicyAttachmentDetails struct { type CreateSteeringPolicyAttachmentDetails struct {
// The OCID of the attached steering policy. // The OCID of the attached steering policy.
@ -32,7 +28,7 @@ type CreateSteeringPolicyAttachmentDetails struct {
DomainName *string `mandatory:"true" json:"domainName"` DomainName *string `mandatory:"true" json:"domainName"`
// A user-friendly name for the steering policy attachment. // A user-friendly name for the steering policy attachment.
// Does not have to be unique, and it's changeable. // Does not have to be unique and can be changed.
// Avoid entering confidential information. // Avoid entering confidential information.
DisplayName *string `mandatory:"false" json:"displayName"` DisplayName *string `mandatory:"false" json:"displayName"`
} }

View file

@ -4,7 +4,7 @@
// DNS API // DNS API
// //
// API for the DNS service. Use this API to manage DNS zones, records, and other DNS resources. // API for the DNS service. Use this API to manage DNS zones, records, and other DNS resources.
// For more information, see Overview of the DNS Service (https://docs.us-phoenix-1.oraclecloud.com/iaas/Content/DNS/Concepts/dnszonemanagement.htm). // For more information, see Overview of the DNS Service (https://docs.cloud.oracle.com/iaas/Content/DNS/Concepts/dnszonemanagement.htm).
// //
package dns package dns
@ -15,82 +15,81 @@ import (
) )
// CreateSteeringPolicyDetails The body for defining a new steering policy. // CreateSteeringPolicyDetails The body for defining a new steering policy.
// *Warning:* Oracle recommends that you avoid using any confidential information when you supply string values using the API. // **Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
type CreateSteeringPolicyDetails struct { type CreateSteeringPolicyDetails struct {
// The OCID of the compartment containing the steering policy. // The OCID of the compartment containing the steering policy.
CompartmentId *string `mandatory:"true" json:"compartmentId"` CompartmentId *string `mandatory:"true" json:"compartmentId"`
// A user-friendly name for the steering policy. // A user-friendly name for the steering policy. Does not have to be unique and can be changed.
// Does not have to be unique, and it's changeable.
// Avoid entering confidential information. // Avoid entering confidential information.
DisplayName *string `mandatory:"true" json:"displayName"` DisplayName *string `mandatory:"true" json:"displayName"`
// The common pattern (or lack thereof) to which the steering policy adheres. This // A set of predefined rules based on the desired purpose of the steering policy. Each
// value restricts the possible configurations of rules, but thereby supports // template utilizes Traffic Management's rules in a different order to produce the desired
// specifically tailored interfaces. Values other than "CUSTOM" require the rules to // results when answering DNS queries.
// begin with an unconditional FILTER that keeps answers contingent upon //
// `answer.isDisabled != true`, followed // **Example:** The `FAILOVER` template determines answers by filtering the policy's answers
// _if and only if the policy references a health check monitor_ by an unconditional // using the `FILTER` rule first, then the following rules in succession: `HEALTH`, `PRIORITY`,
// HEALTH rule, and require the last rule to be an unconditional LIMIT. // and `LIMIT`. This gives the domain dynamic failover capability.
// What must precede the LIMIT rule is determined by the template value: //
// - FAILOVER requires exactly an unconditional PRIORITY rule that ranks answers by pool. // It is **strongly recommended** to use a template other than `CUSTOM` when creating
// Each answer pool must have a unique priority value assigned to it. Answer data must // a steering policy.
// be defined in the `defaultAnswerData` property for the rule and the `cases` property //
// must not be defined. // All templates require the rule order to begin with an unconditional `FILTER` rule that keeps
// - LOAD_BALANCE requires exactly an unconditional WEIGHTED rule that shuffles answers // answers contingent upon `answer.isDisabled != true`, except for `CUSTOM`. A defined
// by name. Answer data must be defined in the `defaultAnswerData` property for the // `HEALTH` rule must follow the `FILTER` rule if the policy references a `healthCheckMonitorId`.
// rule and the `cases` property must not be defined. // The last rule of a template must must be a `LIMIT` rule. For more information about templates
// - ROUTE_BY_GEO requires exactly one PRIORITY rule that ranks answers by pool using the // and code examples, see Traffic Management API Guide (https://docs.cloud.oracle.com/iaas/Content/TrafficManagement/Concepts/trafficmanagementapi.htm).
// geographical location of the client as a condition. Within that rule you may only // **Template Types**
// use `query.client.geoKey` in the `caseCondition` expressions for defining the cases. // * `FAILOVER` - Uses health check information on your endpoints to determine which DNS answers
// For each case in the PRIORITY rule each answer pool must have a unique priority // to serve. If an endpoint fails a health check, the answer for that endpoint will be removed
// value assigned to it. Answer data can only be defined within cases and // from the list of available answers until the endpoint is detected as healthy.
// `defaultAnswerData` cannot be used in the PRIORITY rule. //
// - ROUTE_BY_ASN requires exactly one PRIORITY rule that ranks answers by pool using the // * `LOAD_BALANCE` - Distributes web traffic to specified endpoints based on defined weights.
// ASN of the client as a condition. Within that rule you may only use //
// `query.client.asn` in the `caseCondition` expressions for defining the cases. // * `ROUTE_BY_GEO` - Answers DNS queries based on the query's geographic location. For a list of geographic
// For each case in the PRIORITY rule each answer pool must have a unique priority // locations to route by, see Traffic Management Geographic Locations (https://docs.cloud.oracle.com/iaas/Content/TrafficManagement/Reference/trafficmanagementgeo.htm).
// value assigned to it. Answer data can only be defined within cases and //
// `defaultAnswerData` cannot be used in the PRIORITY rule. // * `ROUTE_BY_ASN` - Answers DNS queries based on the query's originating ASN.
// - ROUTE_BY_IP requires exactly one PRIORITY rule that ranks answers by pool using the //
// IP subnet of the client as a condition. Within that rule you may only use // * `ROUTE_BY_IP` - Answers DNS queries based on the query's IP address.
// `query.client.address` in the `caseCondition` expressions for defining the cases. //
// For each case in the PRIORITY rule each answer pool must have a unique priority // * `CUSTOM` - Allows a customized configuration of rules.
// value assigned to it. Answer data can only be defined within cases and
// `defaultAnswerData` cannot be used in the PRIORITY rule.
// - CUSTOM allows an arbitrary configuration of rules.
// For an existing steering policy, the template value may be changed to any of the
// supported options but the resulting policy must conform to the requirements for the
// new template type or else a Bad Request error will be returned.
Template CreateSteeringPolicyDetailsTemplateEnum `mandatory:"true" json:"template"` Template CreateSteeringPolicyDetailsTemplateEnum `mandatory:"true" json:"template"`
// The Time To Live for responses from the steering policy, in seconds. // The Time To Live (TTL) for responses from the steering policy, in seconds.
// If not specified during creation, a value of 30 seconds will be used. // If not specified during creation, a value of 30 seconds will be used.
Ttl *int `mandatory:"false" json:"ttl"` Ttl *int `mandatory:"false" json:"ttl"`
// The OCID of the health check monitor providing health data about the answers of the // The OCID of the health check monitor providing health data about the answers of the
// steering policy. // steering policy. A steering policy answer with `rdata` matching a monitored endpoint
// A steering policy answer with `rdata` matching a monitored endpoint will use the health // will use the health data of that endpoint. A steering policy answer with `rdata` not
// data of that endpoint. // matching any monitored endpoint will be assumed healthy.
// A steering policy answer with `rdata` not matching any monitored endpoint will be assumed //
// healthy. // **Note:** To use the Health Check monitoring feature in a steering policy, a monitor
// must be created using the Health Checks service first. For more information on how to
// create a monitor, please see Managing Health Checks (https://docs.cloud.oracle.com/iaas/Content/HealthChecks/Tasks/managinghealthchecks.htm).
HealthCheckMonitorId *string `mandatory:"false" json:"healthCheckMonitorId"` HealthCheckMonitorId *string `mandatory:"false" json:"healthCheckMonitorId"`
// Simple key-value pair that is applied without any predefined name, type, or scope. // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
// For more information, see Resource Tags (https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
// Example: `{"bar-key": "value"}` //
// **Example:** `{"Department": "Finance"}`
FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` FreeformTags map[string]string `mandatory:"false" json:"freeformTags"`
// Usage of predefined tag keys. These predefined keys are scoped to a namespace. // Defined tags for this resource. Each key is predefined and scoped to a namespace.
// Example: `{"foo-namespace": {"bar-key": "value"}}` // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
//
// **Example:** `{"Operations": {"CostCenter": "42"}}`
DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"` DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"`
// The set of all answers that can potentially issue from the steering policy. // The set of all answers that can potentially issue from the steering policy.
Answers []SteeringPolicyAnswer `mandatory:"false" json:"answers"` Answers []SteeringPolicyAnswer `mandatory:"false" json:"answers"`
// The pipeline of rules that will be processed in sequence to reduce the pool of answers // The series of rules that will be processed in sequence to reduce the pool of answers
// to a response for any given request. // to a response for any given request.
//
// The first rule receives a shuffled list of all answers, and every other rule receives // The first rule receives a shuffled list of all answers, and every other rule receives
// the list of answers emitted by the one preceding it. The last rule populates the // the list of answers emitted by the one preceding it. The last rule populates the
// response. // response.

View file

@ -4,7 +4,7 @@
// DNS API // DNS API
// //
// API for the DNS service. Use this API to manage DNS zones, records, and other DNS resources. // API for the DNS service. Use this API to manage DNS zones, records, and other DNS resources.
// For more information, see Overview of the DNS Service (https://docs.us-phoenix-1.oraclecloud.com/iaas/Content/DNS/Concepts/dnszonemanagement.htm). // For more information, see Overview of the DNS Service (https://docs.cloud.oracle.com/iaas/Content/DNS/Concepts/dnszonemanagement.htm).
// //
package dns package dns
@ -14,7 +14,7 @@ import (
) )
// CreateZoneDetails The body for defining a new zone. // CreateZoneDetails The body for defining a new zone.
// *Warning:* Oracle recommends that you avoid using any confidential information when you supply string values using the API. // **Warning:** Oracle recommends that you avoid using any confidential information when you supply string values using the API.
type CreateZoneDetails struct { type CreateZoneDetails struct {
// The name of the zone. // The name of the zone.
@ -26,13 +26,16 @@ type CreateZoneDetails struct {
// The OCID of the compartment containing the zone. // The OCID of the compartment containing the zone.
CompartmentId *string `mandatory:"true" json:"compartmentId"` CompartmentId *string `mandatory:"true" json:"compartmentId"`
// Simple key-value pair that is applied without any predefined name, type, or scope. // Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace.
// For more information, see Resource Tags (https://docs.us-phoenix-1.oraclecloud.com/Content/General/Concepts/resourcetags.htm). // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
// Example: `{"bar-key": "value"}` //
// **Example:** `{"Department": "Finance"}`
FreeformTags map[string]string `mandatory:"false" json:"freeformTags"` FreeformTags map[string]string `mandatory:"false" json:"freeformTags"`
// Usage of predefined tag keys. These predefined keys are scoped to a namespace. // Defined tags for this resource. Each key is predefined and scoped to a namespace.
// Example: `{"foo-namespace": {"bar-key": "value"}}` // For more information, see Resource Tags (https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm).
//
// **Example:** `{"Operations": {"CostCenter": "42"}}`
DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"` DefinedTags map[string]map[string]interface{} `mandatory:"false" json:"definedTags"`
// External master servers for the zone. `externalMasters` becomes a // External master servers for the zone. `externalMasters` becomes a

View file

@ -4,7 +4,7 @@
// DNS API // DNS API
// //
// API for the DNS service. Use this API to manage DNS zones, records, and other DNS resources. // API for the DNS service. Use this API to manage DNS zones, records, and other DNS resources.
// For more information, see Overview of the DNS Service (https://docs.us-phoenix-1.oraclecloud.com/iaas/Content/DNS/Concepts/dnszonemanagement.htm). // For more information, see Overview of the DNS Service (https://docs.cloud.oracle.com/iaas/Content/DNS/Concepts/dnszonemanagement.htm).
// //
package dns package dns
@ -59,7 +59,8 @@ func (client *DnsClient) ConfigurationProvider() *common.ConfigurationProvider {
return client.config return client.config
} }
// CreateSteeringPolicy Creates a new steering policy in the specified compartment. // CreateSteeringPolicy Creates a new steering policy in the specified compartment. For more information on
// creating policies with templates, see Traffic Management API Guide (https://docs.cloud.oracle.com/iaas/Content/TrafficManagement/Concepts/trafficmanagementapi.htm).
func (client DnsClient) CreateSteeringPolicy(ctx context.Context, request CreateSteeringPolicyRequest) (response CreateSteeringPolicyResponse, err error) { func (client DnsClient) CreateSteeringPolicy(ctx context.Context, request CreateSteeringPolicyRequest) (response CreateSteeringPolicyResponse, err error) {
var ociResponse common.OCIResponse var ociResponse common.OCIResponse
policy := common.NoRetryPolicy() policy := common.NoRetryPolicy()
@ -106,9 +107,11 @@ func (client DnsClient) createSteeringPolicy(ctx context.Context, request common
return response, err return response, err
} }
// CreateSteeringPolicyAttachment Creates a new attachment between a steering policy and a domain. // CreateSteeringPolicyAttachment Creates a new attachment between a steering policy and a domain, giving the
// policy permission to answer queries for the specified domain. A steering policy must
// be attached to a domain for the policy to answer DNS queries for that domain.
// For the purposes of access control, the attachment is automatically placed // For the purposes of access control, the attachment is automatically placed
// into the same compartment as the containing zone of the domain. // into the same compartment as the domain's zone.
func (client DnsClient) CreateSteeringPolicyAttachment(ctx context.Context, request CreateSteeringPolicyAttachmentRequest) (response CreateSteeringPolicyAttachmentResponse, err error) { func (client DnsClient) CreateSteeringPolicyAttachment(ctx context.Context, request CreateSteeringPolicyAttachmentRequest) (response CreateSteeringPolicyAttachmentResponse, err error) {
var ociResponse common.OCIResponse var ociResponse common.OCIResponse
policy := common.NoRetryPolicy() policy := common.NoRetryPolicy()
@ -285,7 +288,8 @@ func (client DnsClient) deleteRRSet(ctx context.Context, request common.OCIReque
// DeleteSteeringPolicy Deletes the specified steering policy. // DeleteSteeringPolicy Deletes the specified steering policy.
// A `204` response indicates that the delete has been successful. // A `204` response indicates that the delete has been successful.
// Deletion will fail if the policy is attached to any zones. // Deletion will fail if the policy is attached to any zones. To detach a
// policy from a zone, see `DeleteSteeringPolicyAttachment`.
func (client DnsClient) DeleteSteeringPolicy(ctx context.Context, request DeleteSteeringPolicyRequest) (response DeleteSteeringPolicyResponse, err error) { func (client DnsClient) DeleteSteeringPolicy(ctx context.Context, request DeleteSteeringPolicyRequest) (response DeleteSteeringPolicyResponse, err error) {
var ociResponse common.OCIResponse var ociResponse common.OCIResponse
policy := common.NoRetryPolicy() policy := common.NoRetryPolicy()
@ -798,7 +802,10 @@ func (client DnsClient) listZones(ctx context.Context, request common.OCIRequest
return response, err return response, err
} }
// PatchDomainRecords Updates records in the specified zone at a domain. You can update one record or all records for the specified zone depending on the changes provided in the request body. You can also add or remove records using this function. // PatchDomainRecords Updates records in the specified zone at a domain. You can update
// one record or all records for the specified zone depending on the changes
// provided in the request body. You can also add or remove records using this
// function.
func (client DnsClient) PatchDomainRecords(ctx context.Context, request PatchDomainRecordsRequest) (response PatchDomainRecordsResponse, err error) { func (client DnsClient) PatchDomainRecords(ctx context.Context, request PatchDomainRecordsRequest) (response PatchDomainRecordsResponse, err error) {
var ociResponse common.OCIResponse var ociResponse common.OCIResponse
policy := common.NoRetryPolicy() policy := common.NoRetryPolicy()
@ -1016,7 +1023,7 @@ func (client DnsClient) updateRRSet(ctx context.Context, request common.OCIReque
return response, err return response, err
} }
// UpdateSteeringPolicy Updates the specified steering policy with your new information. // UpdateSteeringPolicy Updates the configuration of the specified steering policy.
func (client DnsClient) UpdateSteeringPolicy(ctx context.Context, request UpdateSteeringPolicyRequest) (response UpdateSteeringPolicyResponse, err error) { func (client DnsClient) UpdateSteeringPolicy(ctx context.Context, request UpdateSteeringPolicyRequest) (response UpdateSteeringPolicyResponse, err error) {
var ociResponse common.OCIResponse var ociResponse common.OCIResponse
policy := common.NoRetryPolicy() policy := common.NoRetryPolicy()
@ -1102,7 +1109,7 @@ func (client DnsClient) updateSteeringPolicyAttachment(ctx context.Context, requ
// UpdateZone Updates the specified secondary zone with your new external master // UpdateZone Updates the specified secondary zone with your new external master
// server information. For more information about secondary zone, see // server information. For more information about secondary zone, see
// Manage DNS Service Zone (https://docs.us-phoenix-1.oraclecloud.com/iaas/Content/DNS/Tasks/managingdnszones.htm). // Manage DNS Service Zone (https://docs.cloud.oracle.com/iaas/Content/DNS/Tasks/managingdnszones.htm).
func (client DnsClient) UpdateZone(ctx context.Context, request UpdateZoneRequest) (response UpdateZoneResponse, err error) { func (client DnsClient) UpdateZone(ctx context.Context, request UpdateZoneRequest) (response UpdateZoneResponse, err error) {
var ociResponse common.OCIResponse var ociResponse common.OCIResponse
policy := common.NoRetryPolicy() policy := common.NoRetryPolicy()

View file

@ -4,7 +4,7 @@
// DNS API // DNS API
// //
// API for the DNS service. Use this API to manage DNS zones, records, and other DNS resources. // API for the DNS service. Use this API to manage DNS zones, records, and other DNS resources.
// For more information, see Overview of the DNS Service (https://docs.us-phoenix-1.oraclecloud.com/iaas/Content/DNS/Concepts/dnszonemanagement.htm). // For more information, see Overview of the DNS Service (https://docs.cloud.oracle.com/iaas/Content/DNS/Concepts/dnszonemanagement.htm).
// //
package dns package dns

View file

@ -85,7 +85,7 @@ type GetDomainRecordsResponse struct {
// For list pagination. When this header appears in the response, additional pages // For list pagination. When this header appears in the response, additional pages
// of results remain. For important details about how pagination works, // of results remain. For important details about how pagination works,
// see List Pagination (https://docs.us-phoenix-1.oraclecloud.com/iaas/Content/API/Concepts/usingapi.htm#nine). // see List Pagination (https://docs.cloud.oracle.com/iaas/Content/API/Concepts/usingapi.htm#nine).
OpcNextPage *string `presentIn:"header" name:"opc-next-page"` OpcNextPage *string `presentIn:"header" name:"opc-next-page"`
// The total number of items that match the query. // The total number of items that match the query.

Some files were not shown because too many files have changed in this diff Show more