reconcile merge

Signed-off-by: Huamin Chen <hchen@redhat.com>
This commit is contained in:
Huamin Chen
2019-01-15 16:20:41 +00:00
parent 85b8415024
commit e46099a504
2425 changed files with 271763 additions and 40453 deletions

View File

@ -0,0 +1,407 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"errors"
"fmt"
"reflect"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/appengine"
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/datastore"
)
var (
// ErrInvalidEntityType is returned when functions like Get or Next are
// passed a dst or src argument of invalid type.
ErrInvalidEntityType = errors.New("datastore: invalid entity type")
// ErrInvalidKey is returned when an invalid key is presented.
ErrInvalidKey = errors.New("datastore: invalid key")
// ErrNoSuchEntity is returned when no entity was found for a given key.
ErrNoSuchEntity = errors.New("datastore: no such entity")
)
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct.
// StructType is the type of the struct pointed to by the destination argument
// passed to Get or to Iterator.Next.
type ErrFieldMismatch struct {
StructType reflect.Type
FieldName string
Reason string
}
func (e *ErrFieldMismatch) Error() string {
return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
e.FieldName, e.StructType, e.Reason)
}
// protoToKey converts a Reference proto to a *Key. If the key is invalid,
// protoToKey will return the invalid key along with ErrInvalidKey.
func protoToKey(r *pb.Reference) (k *Key, err error) {
appID := r.GetApp()
namespace := r.GetNameSpace()
for _, e := range r.Path.Element {
k = &Key{
kind: e.GetType(),
stringID: e.GetName(),
intID: e.GetId(),
parent: k,
appID: appID,
namespace: namespace,
}
if !k.valid() {
return k, ErrInvalidKey
}
}
return
}
// keyToProto converts a *Key to a Reference proto.
func keyToProto(defaultAppID string, k *Key) *pb.Reference {
appID := k.appID
if appID == "" {
appID = defaultAppID
}
n := 0
for i := k; i != nil; i = i.parent {
n++
}
e := make([]*pb.Path_Element, n)
for i := k; i != nil; i = i.parent {
n--
e[n] = &pb.Path_Element{
Type: &i.kind,
}
// At most one of {Name,Id} should be set.
// Neither will be set for incomplete keys.
if i.stringID != "" {
e[n].Name = &i.stringID
} else if i.intID != 0 {
e[n].Id = &i.intID
}
}
var namespace *string
if k.namespace != "" {
namespace = proto.String(k.namespace)
}
return &pb.Reference{
App: proto.String(appID),
NameSpace: namespace,
Path: &pb.Path{
Element: e,
},
}
}
// multiKeyToProto is a batch version of keyToProto.
func multiKeyToProto(appID string, key []*Key) []*pb.Reference {
ret := make([]*pb.Reference, len(key))
for i, k := range key {
ret[i] = keyToProto(appID, k)
}
return ret
}
// multiValid is a batch version of Key.valid. It returns an error, not a
// []bool.
func multiValid(key []*Key) error {
invalid := false
for _, k := range key {
if !k.valid() {
invalid = true
break
}
}
if !invalid {
return nil
}
err := make(appengine.MultiError, len(key))
for i, k := range key {
if !k.valid() {
err[i] = ErrInvalidKey
}
}
return err
}
// It's unfortunate that the two semantically equivalent concepts pb.Reference
// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the
// two have different protobuf field numbers.
// referenceValueToKey is the same as protoToKey except the input is a
// PropertyValue_ReferenceValue instead of a Reference.
func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {
appID := r.GetApp()
namespace := r.GetNameSpace()
for _, e := range r.Pathelement {
k = &Key{
kind: e.GetType(),
stringID: e.GetName(),
intID: e.GetId(),
parent: k,
appID: appID,
namespace: namespace,
}
if !k.valid() {
return nil, ErrInvalidKey
}
}
return
}
// keyToReferenceValue is the same as keyToProto except the output is a
// PropertyValue_ReferenceValue instead of a Reference.
func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {
ref := keyToProto(defaultAppID, k)
pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))
for i, e := range ref.Path.Element {
pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{
Type: e.Type,
Id: e.Id,
Name: e.Name,
}
}
return &pb.PropertyValue_ReferenceValue{
App: ref.App,
NameSpace: ref.NameSpace,
Pathelement: pe,
}
}
type multiArgType int
const (
multiArgTypeInvalid multiArgType = iota
multiArgTypePropertyLoadSaver
multiArgTypeStruct
multiArgTypeStructPtr
multiArgTypeInterface
)
// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
// type S, for some interface type I, or some non-interface non-pointer type P
// such that P or *P implements PropertyLoadSaver.
//
// It returns what category the slice's elements are, and the reflect.Type
// that represents S, I or P.
//
// As a special case, PropertyList is an invalid type for v.
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
if v.Kind() != reflect.Slice {
return multiArgTypeInvalid, nil
}
if v.Type() == typeOfPropertyList {
return multiArgTypeInvalid, nil
}
elemType = v.Type().Elem()
if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
return multiArgTypePropertyLoadSaver, elemType
}
switch elemType.Kind() {
case reflect.Struct:
return multiArgTypeStruct, elemType
case reflect.Interface:
return multiArgTypeInterface, elemType
case reflect.Ptr:
elemType = elemType.Elem()
if elemType.Kind() == reflect.Struct {
return multiArgTypeStructPtr, elemType
}
}
return multiArgTypeInvalid, nil
}
// Get loads the entity stored for k into dst, which must be a struct pointer
// or implement PropertyLoadSaver. If there is no such entity for the key, Get
// returns ErrNoSuchEntity.
//
// The values of dst's unmatched struct fields are not modified, and matching
// slice-typed fields are not reset before appending to them. In particular, it
// is recommended to pass a pointer to a zero valued struct on each Get call.
//
// ErrFieldMismatch is returned when a field is to be loaded into a different
// type than the one it was stored from, or when a field is missing or
// unexported in the destination struct. ErrFieldMismatch is only returned if
// dst is a struct pointer.
func Get(c context.Context, key *Key, dst interface{}) error {
if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here
return ErrInvalidEntityType
}
err := GetMulti(c, []*Key{key}, []interface{}{dst})
if me, ok := err.(appengine.MultiError); ok {
return me[0]
}
return err
}
// GetMulti is a batch version of Get.
//
// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
// type I, or some non-interface non-pointer type P such that P or *P
// implements PropertyLoadSaver. If an []I, each element must be a valid dst
// for Get: it must be a struct pointer or implement PropertyLoadSaver.
//
// As a special case, PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when []PropertyList was intended.
func GetMulti(c context.Context, key []*Key, dst interface{}) error {
v := reflect.ValueOf(dst)
multiArgType, _ := checkMultiArg(v)
if multiArgType == multiArgTypeInvalid {
return errors.New("datastore: dst has invalid type")
}
if len(key) != v.Len() {
return errors.New("datastore: key and dst slices have different length")
}
if len(key) == 0 {
return nil
}
if err := multiValid(key); err != nil {
return err
}
req := &pb.GetRequest{
Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
}
res := &pb.GetResponse{}
if err := internal.Call(c, "datastore_v3", "Get", req, res); err != nil {
return err
}
if len(key) != len(res.Entity) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
multiErr, any := make(appengine.MultiError, len(key)), false
for i, e := range res.Entity {
if e.Entity == nil {
multiErr[i] = ErrNoSuchEntity
} else {
elem := v.Index(i)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
elem.Set(reflect.New(elem.Type().Elem()))
}
multiErr[i] = loadEntity(elem.Interface(), e.Entity)
}
if multiErr[i] != nil {
any = true
}
}
if any {
return multiErr
}
return nil
}
// Put saves the entity src into the datastore with key k. src must be a struct
// pointer or implement PropertyLoadSaver; if a struct pointer then any
// unexported fields of that struct will be skipped. If k is an incomplete key,
// the returned key will be a unique key generated by the datastore.
func Put(c context.Context, key *Key, src interface{}) (*Key, error) {
k, err := PutMulti(c, []*Key{key}, []interface{}{src})
if err != nil {
if me, ok := err.(appengine.MultiError); ok {
return nil, me[0]
}
return nil, err
}
return k[0], nil
}
// PutMulti is a batch version of Put.
//
// src must satisfy the same conditions as the dst argument to GetMulti.
func PutMulti(c context.Context, key []*Key, src interface{}) ([]*Key, error) {
v := reflect.ValueOf(src)
multiArgType, _ := checkMultiArg(v)
if multiArgType == multiArgTypeInvalid {
return nil, errors.New("datastore: src has invalid type")
}
if len(key) != v.Len() {
return nil, errors.New("datastore: key and src slices have different length")
}
if len(key) == 0 {
return nil, nil
}
appID := internal.FullyQualifiedAppID(c)
if err := multiValid(key); err != nil {
return nil, err
}
req := &pb.PutRequest{}
for i := range key {
elem := v.Index(i)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
sProto, err := saveEntity(appID, key[i], elem.Interface())
if err != nil {
return nil, err
}
req.Entity = append(req.Entity, sProto)
}
res := &pb.PutResponse{}
if err := internal.Call(c, "datastore_v3", "Put", req, res); err != nil {
return nil, err
}
if len(key) != len(res.Key) {
return nil, errors.New("datastore: internal error: server returned the wrong number of keys")
}
ret := make([]*Key, len(key))
for i := range ret {
var err error
ret[i], err = protoToKey(res.Key[i])
if err != nil || ret[i].Incomplete() {
return nil, errors.New("datastore: internal error: server returned an invalid key")
}
}
return ret, nil
}
// Delete deletes the entity for the given key.
func Delete(c context.Context, key *Key) error {
err := DeleteMulti(c, []*Key{key})
if me, ok := err.(appengine.MultiError); ok {
return me[0]
}
return err
}
// DeleteMulti is a batch version of Delete.
func DeleteMulti(c context.Context, key []*Key) error {
if len(key) == 0 {
return nil
}
if err := multiValid(key); err != nil {
return err
}
req := &pb.DeleteRequest{
Key: multiKeyToProto(internal.FullyQualifiedAppID(c), key),
}
res := &pb.DeleteResponse{}
return internal.Call(c, "datastore_v3", "Delete", req, res)
}
func namespaceMod(m proto.Message, namespace string) {
// pb.Query is the only type that has a name_space field.
// All other namespace support in datastore is in the keys.
switch m := m.(type) {
case *pb.Query:
if m.NameSpace == nil {
m.NameSpace = &namespace
}
}
}
func init() {
internal.NamespaceMods["datastore_v3"] = namespaceMod
internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name)
internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT))
}

File diff suppressed because it is too large Load Diff

361
vendor/google.golang.org/appengine/datastore/doc.go generated vendored Normal file
View File

@ -0,0 +1,361 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
/*
Package datastore provides a client for App Engine's datastore service.
Basic Operations
Entities are the unit of storage and are associated with a key. A key
consists of an optional parent key, a string application ID, a string kind
(also known as an entity type), and either a StringID or an IntID. A
StringID is also known as an entity name or key name.
It is valid to create a key with a zero StringID and a zero IntID; this is
called an incomplete key, and does not refer to any saved entity. Putting an
entity into the datastore under an incomplete key will cause a unique key
to be generated for that entity, with a non-zero IntID.
An entity's contents are a mapping from case-sensitive field names to values.
Valid value types are:
- signed integers (int, int8, int16, int32 and int64),
- bool,
- string,
- float32 and float64,
- []byte (up to 1 megabyte in length),
- any type whose underlying type is one of the above predeclared types,
- ByteString,
- *Key,
- time.Time (stored with microsecond precision),
- appengine.BlobKey,
- appengine.GeoPoint,
- structs whose fields are all valid value types,
- slices of any of the above.
Slices of structs are valid, as are structs that contain slices. However, if
one struct contains another, then at most one of those can be repeated. This
disqualifies recursively defined struct types: any struct T that (directly or
indirectly) contains a []T.
The Get and Put functions load and save an entity's contents. An entity's
contents are typically represented by a struct pointer.
Example code:
type Entity struct {
Value string
}
func handle(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
k := datastore.NewKey(ctx, "Entity", "stringID", 0, nil)
e := new(Entity)
if err := datastore.Get(ctx, k, e); err != nil {
http.Error(w, err.Error(), 500)
return
}
old := e.Value
e.Value = r.URL.Path
if _, err := datastore.Put(ctx, k, e); err != nil {
http.Error(w, err.Error(), 500)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value)
}
GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
Delete functions. They take a []*Key instead of a *Key, and may return an
appengine.MultiError when encountering partial failure.
Properties
An entity's contents can be represented by a variety of types. These are
typically struct pointers, but can also be any type that implements the
PropertyLoadSaver interface. If using a struct pointer, you do not have to
explicitly implement the PropertyLoadSaver interface; the datastore will
automatically convert via reflection. If a struct pointer does implement that
interface then those methods will be used in preference to the default
behavior for struct pointers. Struct pointers are more strongly typed and are
easier to use; PropertyLoadSavers are more flexible.
The actual types passed do not have to match between Get and Put calls or even
across different calls to datastore. It is valid to put a *PropertyList and
get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
Conceptually, any entity is saved as a sequence of properties, and is loaded
into the destination value on a property-by-property basis. When loading into
a struct pointer, an entity that cannot be completely represented (such as a
missing field) will result in an ErrFieldMismatch error but it is up to the
caller whether this error is fatal, recoverable or ignorable.
By default, for struct pointers, all properties are potentially indexed, and
the property name is the same as the field name (and hence must start with an
upper case letter).
Fields may have a `datastore:"name,options"` tag. The tag name is the
property name, which must be one or more valid Go identifiers joined by ".",
but may start with a lower case letter. An empty tag name means to just use the
field name. A "-" tag name means that the datastore will ignore that field.
The only valid options are "omitempty" and "noindex".
If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save.
The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero.
Struct field values will never be empty.
If options include "noindex" then the field will not be indexed. All fields are indexed
by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
fields used to store long strings and byte slices must be tagged with "noindex"
or they will cause Put operations to fail.
To use multiple options together, separate them by a comma.
The order does not matter.
If the options is "" then the comma may be omitted.
Example code:
// A and B are renamed to a and b.
// A, C and J are not indexed.
// D's tag is equivalent to having no tag at all (E).
// I is ignored entirely by the datastore.
// J has tag information for both the datastore and json packages.
type TaggedStruct struct {
A int `datastore:"a,noindex"`
B int `datastore:"b"`
C int `datastore:",noindex"`
D int `datastore:""`
E int
I int `datastore:"-"`
J int `datastore:",noindex" json:"j"`
}
Structured Properties
If the struct pointed to contains other structs, then the nested or embedded
structs are flattened. For example, given these definitions:
type Inner1 struct {
W int32
X string
}
type Inner2 struct {
Y float64
}
type Inner3 struct {
Z bool
}
type Outer struct {
A int16
I []Inner1
J Inner2
Inner3
}
then an Outer's properties would be equivalent to those of:
type OuterEquivalent struct {
A int16
IDotW []int32 `datastore:"I.W"`
IDotX []string `datastore:"I.X"`
JDotY float64 `datastore:"J.Y"`
Z bool
}
If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
If an outer struct is tagged "noindex" then all of its implicit flattened
fields are effectively "noindex".
The PropertyLoadSaver Interface
An entity's contents can also be represented by any type that implements the
PropertyLoadSaver interface. This type may be a struct pointer, but it does
not have to be. The datastore package will call Load when getting the entity's
contents, and Save when putting the entity's contents.
Possible uses include deriving non-stored fields, verifying fields, or indexing
a field only if its value is positive.
Example code:
type CustomPropsExample struct {
I, J int
// Sum is not stored, but should always be equal to I + J.
Sum int `datastore:"-"`
}
func (x *CustomPropsExample) Load(ps []datastore.Property) error {
// Load I and J as usual.
if err := datastore.LoadStruct(x, ps); err != nil {
return err
}
// Derive the Sum field.
x.Sum = x.I + x.J
return nil
}
func (x *CustomPropsExample) Save() ([]datastore.Property, error) {
// Validate the Sum field.
if x.Sum != x.I + x.J {
return nil, errors.New("CustomPropsExample has inconsistent sum")
}
// Save I and J as usual. The code below is equivalent to calling
// "return datastore.SaveStruct(x)", but is done manually for
// demonstration purposes.
return []datastore.Property{
{
Name: "I",
Value: int64(x.I),
},
{
Name: "J",
Value: int64(x.J),
},
}, nil
}
The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
arbitrary entity's contents.
Queries
Queries retrieve entities based on their properties or key's ancestry. Running
a query yields an iterator of results: either keys or (key, entity) pairs.
Queries are re-usable and it is safe to call Query.Run from concurrent
goroutines. Iterators are not safe for concurrent use.
Queries are immutable, and are either created by calling NewQuery, or derived
from an existing query by calling a method like Filter or Order that returns a
new query value. A query is typically constructed by calling NewQuery followed
by a chain of zero or more such methods. These methods are:
- Ancestor and Filter constrain the entities returned by running a query.
- Order affects the order in which they are returned.
- Project constrains the fields returned.
- Distinct de-duplicates projected entities.
- KeysOnly makes the iterator return only keys, not (key, entity) pairs.
- Start, End, Offset and Limit define which sub-sequence of matching entities
to return. Start and End take cursors, Offset and Limit take integers. Start
and Offset affect the first result, End and Limit affect the last result.
If both Start and Offset are set, then the offset is relative to Start.
If both End and Limit are set, then the earliest constraint wins. Limit is
relative to Start+Offset, not relative to End. As a special case, a
negative limit means unlimited.
Example code:
type Widget struct {
Description string
Price int
}
func handle(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
q := datastore.NewQuery("Widget").
Filter("Price <", 1000).
Order("-Price")
b := new(bytes.Buffer)
for t := q.Run(ctx); ; {
var x Widget
key, err := t.Next(&x)
if err == datastore.Done {
break
}
if err != nil {
serveError(ctx, w, err)
return
}
fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x)
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
io.Copy(w, b)
}
Transactions
RunInTransaction runs a function in a transaction.
Example code:
type Counter struct {
Count int
}
func inc(ctx context.Context, key *datastore.Key) (int, error) {
var x Counter
if err := datastore.Get(ctx, key, &x); err != nil && err != datastore.ErrNoSuchEntity {
return 0, err
}
x.Count++
if _, err := datastore.Put(ctx, key, &x); err != nil {
return 0, err
}
return x.Count, nil
}
func handle(w http.ResponseWriter, r *http.Request) {
ctx := appengine.NewContext(r)
var count int
err := datastore.RunInTransaction(ctx, func(ctx context.Context) error {
var err1 error
count, err1 = inc(ctx, datastore.NewKey(ctx, "Counter", "singleton", 0, nil))
return err1
}, nil)
if err != nil {
serveError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
fmt.Fprintf(w, "Count=%d", count)
}
Metadata
The datastore package provides access to some of App Engine's datastore
metadata. This metadata includes information about the entity groups,
namespaces, entity kinds, and properties in the datastore, as well as the
property representations for each property.
Example code:
func handle(w http.ResponseWriter, r *http.Request) {
// Print all the kinds in the datastore, with all the indexed
// properties (and their representations) for each.
ctx := appengine.NewContext(r)
kinds, err := datastore.Kinds(ctx)
if err != nil {
serveError(ctx, w, err)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
for _, kind := range kinds {
fmt.Fprintf(w, "%s:\n", kind)
props, err := datastore.KindProperties(ctx, kind)
if err != nil {
fmt.Fprintln(w, "\t(unable to retrieve properties)")
continue
}
for p, rep := range props {
fmt.Fprintf(w, "\t-%s (%s)\n", p, strings.Join(rep, ", "))
}
}
}
*/
package datastore // import "google.golang.org/appengine/datastore"

396
vendor/google.golang.org/appengine/datastore/key.go generated vendored Normal file
View File

@ -0,0 +1,396 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"bytes"
"encoding/base64"
"encoding/gob"
"errors"
"fmt"
"strconv"
"strings"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/datastore"
)
type KeyRangeCollisionError struct {
start int64
end int64
}
func (e *KeyRangeCollisionError) Error() string {
return fmt.Sprintf("datastore: Collision when attempting to allocate range [%d, %d]",
e.start, e.end)
}
type KeyRangeContentionError struct {
start int64
end int64
}
func (e *KeyRangeContentionError) Error() string {
return fmt.Sprintf("datastore: Contention when attempting to allocate range [%d, %d]",
e.start, e.end)
}
// Key represents the datastore key for a stored entity, and is immutable.
type Key struct {
kind string
stringID string
intID int64
parent *Key
appID string
namespace string
}
// Kind returns the key's kind (also known as entity type).
func (k *Key) Kind() string {
return k.kind
}
// StringID returns the key's string ID (also known as an entity name or key
// name), which may be "".
func (k *Key) StringID() string {
return k.stringID
}
// IntID returns the key's integer ID, which may be 0.
func (k *Key) IntID() int64 {
return k.intID
}
// Parent returns the key's parent key, which may be nil.
func (k *Key) Parent() *Key {
return k.parent
}
// AppID returns the key's application ID.
func (k *Key) AppID() string {
return k.appID
}
// Namespace returns the key's namespace.
func (k *Key) Namespace() string {
return k.namespace
}
// Incomplete returns whether the key does not refer to a stored entity.
// In particular, whether the key has a zero StringID and a zero IntID.
func (k *Key) Incomplete() bool {
return k.stringID == "" && k.intID == 0
}
// valid returns whether the key is valid.
func (k *Key) valid() bool {
if k == nil {
return false
}
for ; k != nil; k = k.parent {
if k.kind == "" || k.appID == "" {
return false
}
if k.stringID != "" && k.intID != 0 {
return false
}
if k.parent != nil {
if k.parent.Incomplete() {
return false
}
if k.parent.appID != k.appID || k.parent.namespace != k.namespace {
return false
}
}
}
return true
}
// Equal returns whether two keys are equal.
func (k *Key) Equal(o *Key) bool {
for k != nil && o != nil {
if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {
return false
}
k, o = k.parent, o.parent
}
return k == o
}
// root returns the furthest ancestor of a key, which may be itself.
func (k *Key) root() *Key {
for k.parent != nil {
k = k.parent
}
return k
}
// marshal marshals the key's string representation to the buffer.
func (k *Key) marshal(b *bytes.Buffer) {
if k.parent != nil {
k.parent.marshal(b)
}
b.WriteByte('/')
b.WriteString(k.kind)
b.WriteByte(',')
if k.stringID != "" {
b.WriteString(k.stringID)
} else {
b.WriteString(strconv.FormatInt(k.intID, 10))
}
}
// String returns a string representation of the key.
func (k *Key) String() string {
if k == nil {
return ""
}
b := bytes.NewBuffer(make([]byte, 0, 512))
k.marshal(b)
return b.String()
}
type gobKey struct {
Kind string
StringID string
IntID int64
Parent *gobKey
AppID string
Namespace string
}
func keyToGobKey(k *Key) *gobKey {
if k == nil {
return nil
}
return &gobKey{
Kind: k.kind,
StringID: k.stringID,
IntID: k.intID,
Parent: keyToGobKey(k.parent),
AppID: k.appID,
Namespace: k.namespace,
}
}
func gobKeyToKey(gk *gobKey) *Key {
if gk == nil {
return nil
}
return &Key{
kind: gk.Kind,
stringID: gk.StringID,
intID: gk.IntID,
parent: gobKeyToKey(gk.Parent),
appID: gk.AppID,
namespace: gk.Namespace,
}
}
func (k *Key) GobEncode() ([]byte, error) {
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func (k *Key) GobDecode(buf []byte) error {
gk := new(gobKey)
if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
return err
}
*k = *gobKeyToKey(gk)
return nil
}
func (k *Key) MarshalJSON() ([]byte, error) {
return []byte(`"` + k.Encode() + `"`), nil
}
func (k *Key) UnmarshalJSON(buf []byte) error {
if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
return errors.New("datastore: bad JSON key")
}
k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
if err != nil {
return err
}
*k = *k2
return nil
}
// Encode returns an opaque representation of the key
// suitable for use in HTML and URLs.
// This is compatible with the Python and Java runtimes.
func (k *Key) Encode() string {
ref := keyToProto("", k)
b, err := proto.Marshal(ref)
if err != nil {
panic(err)
}
// Trailing padding is stripped.
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// DecodeKey decodes a key from the opaque representation returned by Encode.
func DecodeKey(encoded string) (*Key, error) {
// Re-add padding.
if m := len(encoded) % 4; m != 0 {
encoded += strings.Repeat("=", 4-m)
}
b, err := base64.URLEncoding.DecodeString(encoded)
if err != nil {
return nil, err
}
ref := new(pb.Reference)
if err := proto.Unmarshal(b, ref); err != nil {
return nil, err
}
return protoToKey(ref)
}
// NewIncompleteKey creates a new incomplete key.
// kind cannot be empty.
func NewIncompleteKey(c context.Context, kind string, parent *Key) *Key {
return NewKey(c, kind, "", 0, parent)
}
// NewKey creates a new key.
// kind cannot be empty.
// Either one or both of stringID and intID must be zero. If both are zero,
// the key returned is incomplete.
// parent must either be a complete key or nil.
func NewKey(c context.Context, kind, stringID string, intID int64, parent *Key) *Key {
// If there's a parent key, use its namespace.
// Otherwise, use any namespace attached to the context.
var namespace string
if parent != nil {
namespace = parent.namespace
} else {
namespace = internal.NamespaceFromContext(c)
}
return &Key{
kind: kind,
stringID: stringID,
intID: intID,
parent: parent,
appID: internal.FullyQualifiedAppID(c),
namespace: namespace,
}
}
// AllocateIDs returns a range of n integer IDs with the given kind and parent
// combination. kind cannot be empty; parent may be nil. The IDs in the range
// returned will not be used by the datastore's automatic ID sequence generator
// and may be used with NewKey without conflict.
//
// The range is inclusive at the low end and exclusive at the high end. In
// other words, valid intIDs x satisfy low <= x && x < high.
//
// If no error is returned, low + n == high.
func AllocateIDs(c context.Context, kind string, parent *Key, n int) (low, high int64, err error) {
if kind == "" {
return 0, 0, errors.New("datastore: AllocateIDs given an empty kind")
}
if n < 0 {
return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n)
}
if n == 0 {
return 0, 0, nil
}
req := &pb.AllocateIdsRequest{
ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
Size: proto.Int64(int64(n)),
}
res := &pb.AllocateIdsResponse{}
if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
return 0, 0, err
}
// The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)
// is inclusive at the low end and exclusive at the high end, so we add 1.
low = res.GetStart()
high = res.GetEnd() + 1
if low+int64(n) != high {
return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n)
}
return low, high, nil
}
// AllocateIDRange allocates a range of IDs with specific endpoints.
// The range is inclusive at both the low and high end. Once these IDs have been
// allocated, you can manually assign them to newly created entities.
//
// The Datastore's automatic ID allocator never assigns a key that has already
// been allocated (either through automatic ID allocation or through an explicit
// AllocateIDs call). As a result, entities written to the given key range will
// never be overwritten. However, writing entities with manually assigned keys in
// this range may overwrite existing entities (or new entities written by a separate
// request), depending on the error returned.
//
// Use this only if you have an existing numeric ID range that you want to reserve
// (for example, bulk loading entities that already have IDs). If you don't care
// about which IDs you receive, use AllocateIDs instead.
//
// AllocateIDRange returns nil if the range is successfully allocated. If one or more
// entities with an ID in the given range already exist, it returns a KeyRangeCollisionError.
// If the Datastore has already cached IDs in this range (e.g. from a previous call to
// AllocateIDRange), it returns a KeyRangeContentionError. Errors of other types indicate
// problems with arguments or an error returned directly from the Datastore.
func AllocateIDRange(c context.Context, kind string, parent *Key, start, end int64) (err error) {
if kind == "" {
return errors.New("datastore: AllocateIDRange given an empty kind")
}
if start < 1 || end < 1 {
return errors.New("datastore: AllocateIDRange start and end must both be greater than 0")
}
if start > end {
return errors.New("datastore: AllocateIDRange start must be before end")
}
req := &pb.AllocateIdsRequest{
ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
Max: proto.Int64(end),
}
res := &pb.AllocateIdsResponse{}
if err := internal.Call(c, "datastore_v3", "AllocateIds", req, res); err != nil {
return err
}
// Check for collisions, i.e. existing entities with IDs in this range.
// We could do this before the allocation, but we'd still have to do it
// afterward as well to catch the race condition where an entity is inserted
// after that initial check but before the allocation. Skip the up-front check
// and just do it once.
q := NewQuery(kind).Filter("__key__ >=", NewKey(c, kind, "", start, parent)).
Filter("__key__ <=", NewKey(c, kind, "", end, parent)).KeysOnly().Limit(1)
keys, err := q.GetAll(c, nil)
if err != nil {
return err
}
if len(keys) != 0 {
return &KeyRangeCollisionError{start: start, end: end}
}
// Check for a race condition, i.e. cases where the datastore may have
// cached ID batches that contain IDs in this range.
if start < res.GetStart() {
return &KeyRangeContentionError{start: start, end: end}
}
return nil
}

View File

@ -0,0 +1,204 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"bytes"
"encoding/gob"
"encoding/json"
"testing"
"golang.org/x/net/context"
"google.golang.org/appengine/internal"
)
func TestKeyEncoding(t *testing.T) {
testCases := []struct {
desc string
key *Key
exp string
}{
{
desc: "A simple key with an int ID",
key: &Key{
kind: "Person",
intID: 1,
appID: "glibrary",
},
exp: "aghnbGlicmFyeXIMCxIGUGVyc29uGAEM",
},
{
desc: "A simple key with a string ID",
key: &Key{
kind: "Graph",
stringID: "graph:7-day-active",
appID: "glibrary",
},
exp: "aghnbGlicmFyeXIdCxIFR3JhcGgiEmdyYXBoOjctZGF5LWFjdGl2ZQw",
},
{
desc: "A key with a parent",
key: &Key{
kind: "WordIndex",
intID: 1033,
parent: &Key{
kind: "WordIndex",
intID: 1020032,
appID: "glibrary",
},
appID: "glibrary",
},
exp: "aghnbGlicmFyeXIhCxIJV29yZEluZGV4GIChPgwLEglXb3JkSW5kZXgYiQgM",
},
}
for _, tc := range testCases {
enc := tc.key.Encode()
if enc != tc.exp {
t.Errorf("%s: got %q, want %q", tc.desc, enc, tc.exp)
}
key, err := DecodeKey(tc.exp)
if err != nil {
t.Errorf("%s: failed decoding key: %v", tc.desc, err)
continue
}
if !key.Equal(tc.key) {
t.Errorf("%s: decoded key %v, want %v", tc.desc, key, tc.key)
}
}
}
func TestKeyGob(t *testing.T) {
k := &Key{
kind: "Gopher",
intID: 3,
parent: &Key{
kind: "Mom",
stringID: "narwhal",
appID: "gopher-con",
},
appID: "gopher-con",
}
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(k); err != nil {
t.Fatalf("gob encode failed: %v", err)
}
k2 := new(Key)
if err := gob.NewDecoder(buf).Decode(k2); err != nil {
t.Fatalf("gob decode failed: %v", err)
}
if !k2.Equal(k) {
t.Errorf("gob round trip of %v produced %v", k, k2)
}
}
func TestNilKeyGob(t *testing.T) {
type S struct {
Key *Key
}
s1 := new(S)
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(s1); err != nil {
t.Fatalf("gob encode failed: %v", err)
}
s2 := new(S)
if err := gob.NewDecoder(buf).Decode(s2); err != nil {
t.Fatalf("gob decode failed: %v", err)
}
if s2.Key != nil {
t.Errorf("gob round trip of nil key produced %v", s2.Key)
}
}
func TestKeyJSON(t *testing.T) {
k := &Key{
kind: "Gopher",
intID: 2,
parent: &Key{
kind: "Mom",
stringID: "narwhal",
appID: "gopher-con",
},
appID: "gopher-con",
}
exp := `"` + k.Encode() + `"`
buf, err := json.Marshal(k)
if err != nil {
t.Fatalf("json.Marshal failed: %v", err)
}
if s := string(buf); s != exp {
t.Errorf("JSON encoding of key %v: got %q, want %q", k, s, exp)
}
k2 := new(Key)
if err := json.Unmarshal(buf, k2); err != nil {
t.Fatalf("json.Unmarshal failed: %v", err)
}
if !k2.Equal(k) {
t.Errorf("JSON round trip of %v produced %v", k, k2)
}
}
func TestNilKeyJSON(t *testing.T) {
type S struct {
Key *Key
}
s1 := new(S)
buf, err := json.Marshal(s1)
if err != nil {
t.Fatalf("json.Marshal failed: %v", err)
}
s2 := new(S)
if err := json.Unmarshal(buf, s2); err != nil {
t.Fatalf("json.Unmarshal failed: %v", err)
}
if s2.Key != nil {
t.Errorf("JSON round trip of nil key produced %v", s2.Key)
}
}
func TestIncompleteKeyWithParent(t *testing.T) {
c := internal.WithAppIDOverride(context.Background(), "s~some-app")
// fadduh is a complete key.
fadduh := NewKey(c, "Person", "", 1, nil)
if fadduh.Incomplete() {
t.Fatalf("fadduh is incomplete")
}
// robert is an incomplete key with fadduh as a parent.
robert := NewIncompleteKey(c, "Person", fadduh)
if !robert.Incomplete() {
t.Fatalf("robert is complete")
}
// Both should be valid keys.
if !fadduh.valid() {
t.Errorf("fadduh is invalid: %v", fadduh)
}
if !robert.valid() {
t.Errorf("robert is invalid: %v", robert)
}
}
func TestNamespace(t *testing.T) {
key := &Key{
kind: "Person",
intID: 1,
appID: "s~some-app",
namespace: "mynamespace",
}
if g, w := key.Namespace(), "mynamespace"; g != w {
t.Errorf("key.Namespace() = %q, want %q", g, w)
}
}

429
vendor/google.golang.org/appengine/datastore/load.go generated vendored Normal file
View File

@ -0,0 +1,429 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine"
pb "google.golang.org/appengine/internal/datastore"
)
var (
typeOfBlobKey = reflect.TypeOf(appengine.BlobKey(""))
typeOfByteSlice = reflect.TypeOf([]byte(nil))
typeOfByteString = reflect.TypeOf(ByteString(nil))
typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{})
typeOfTime = reflect.TypeOf(time.Time{})
typeOfKeyPtr = reflect.TypeOf(&Key{})
typeOfEntityPtr = reflect.TypeOf(&Entity{})
)
// typeMismatchReason returns a string explaining why the property p could not
// be stored in an entity field of type v.Type().
func typeMismatchReason(pValue interface{}, v reflect.Value) string {
entityType := "empty"
switch pValue.(type) {
case int64:
entityType = "int"
case bool:
entityType = "bool"
case string:
entityType = "string"
case float64:
entityType = "float"
case *Key:
entityType = "*datastore.Key"
case time.Time:
entityType = "time.Time"
case appengine.BlobKey:
entityType = "appengine.BlobKey"
case appengine.GeoPoint:
entityType = "appengine.GeoPoint"
case ByteString:
entityType = "datastore.ByteString"
case []byte:
entityType = "[]byte"
}
return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
}
type propertyLoader struct {
// m holds the number of times a substruct field like "Foo.Bar.Baz" has
// been seen so far. The map is constructed lazily.
m map[string]int
}
func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {
var v reflect.Value
var sliceIndex int
name := p.Name
// If name ends with a '.', the last field is anonymous.
// In this case, strings.Split will give us "" as the
// last element of our fields slice, which will match the ""
// field name in the substruct codec.
fields := strings.Split(name, ".")
for len(fields) > 0 {
var decoder fieldCodec
var ok bool
// Cut off the last field (delimited by ".") and find its parent
// in the codec.
// eg. for name "A.B.C.D", split off "A.B.C" and try to
// find a field in the codec with this name.
// Loop again with "A.B", etc.
for i := len(fields); i > 0; i-- {
parent := strings.Join(fields[:i], ".")
decoder, ok = codec.fields[parent]
if ok {
fields = fields[i:]
break
}
}
// If we never found a matching field in the codec, return
// error message.
if !ok {
return "no such struct field"
}
v = initField(structValue, decoder.path)
if !v.IsValid() {
return "no such struct field"
}
if !v.CanSet() {
return "cannot set struct field"
}
if decoder.structCodec != nil {
codec = decoder.structCodec
structValue = v
}
if v.Kind() == reflect.Slice && v.Type() != typeOfByteSlice {
if l.m == nil {
l.m = make(map[string]int)
}
sliceIndex = l.m[p.Name]
l.m[p.Name] = sliceIndex + 1
for v.Len() <= sliceIndex {
v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
}
structValue = v.Index(sliceIndex)
requireSlice = false
}
}
var slice reflect.Value
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
slice = v
v = reflect.New(v.Type().Elem()).Elem()
} else if requireSlice {
return "multiple-valued property requires a slice field type"
}
// Convert indexValues to a Go value with a meaning derived from the
// destination type.
pValue := p.Value
if iv, ok := pValue.(indexValue); ok {
meaning := pb.Property_NO_MEANING
switch v.Type() {
case typeOfBlobKey:
meaning = pb.Property_BLOBKEY
case typeOfByteSlice:
meaning = pb.Property_BLOB
case typeOfByteString:
meaning = pb.Property_BYTESTRING
case typeOfGeoPoint:
meaning = pb.Property_GEORSS_POINT
case typeOfTime:
meaning = pb.Property_GD_WHEN
case typeOfEntityPtr:
meaning = pb.Property_ENTITY_PROTO
}
var err error
pValue, err = propValue(iv.value, meaning)
if err != nil {
return err.Error()
}
}
if errReason := setVal(v, pValue); errReason != "" {
// Set the slice back to its zero value.
if slice.IsValid() {
slice.Set(reflect.Zero(slice.Type()))
}
return errReason
}
if slice.IsValid() {
slice.Index(sliceIndex).Set(v)
}
return ""
}
// setVal sets v to the value pValue.
func setVal(v reflect.Value, pValue interface{}) string {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
x, ok := pValue.(int64)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
if v.OverflowInt(x) {
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
}
v.SetInt(x)
case reflect.Bool:
x, ok := pValue.(bool)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
v.SetBool(x)
case reflect.String:
switch x := pValue.(type) {
case appengine.BlobKey:
v.SetString(string(x))
case ByteString:
v.SetString(string(x))
case string:
v.SetString(x)
default:
if pValue != nil {
return typeMismatchReason(pValue, v)
}
}
case reflect.Float32, reflect.Float64:
x, ok := pValue.(float64)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
if v.OverflowFloat(x) {
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
}
v.SetFloat(x)
case reflect.Ptr:
x, ok := pValue.(*Key)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
if _, ok := v.Interface().(*Key); !ok {
return typeMismatchReason(pValue, v)
}
v.Set(reflect.ValueOf(x))
case reflect.Struct:
switch v.Type() {
case typeOfTime:
x, ok := pValue.(time.Time)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
v.Set(reflect.ValueOf(x))
case typeOfGeoPoint:
x, ok := pValue.(appengine.GeoPoint)
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
v.Set(reflect.ValueOf(x))
default:
ent, ok := pValue.(*Entity)
if !ok {
return typeMismatchReason(pValue, v)
}
// Recursively load nested struct
pls, err := newStructPLS(v.Addr().Interface())
if err != nil {
return err.Error()
}
// if ent has a Key value and our struct has a Key field,
// load the Entity's Key value into the Key field on the struct.
if ent.Key != nil && pls.codec.keyField != -1 {
pls.v.Field(pls.codec.keyField).Set(reflect.ValueOf(ent.Key))
}
err = pls.Load(ent.Properties)
if err != nil {
return err.Error()
}
}
case reflect.Slice:
x, ok := pValue.([]byte)
if !ok {
if y, yok := pValue.(ByteString); yok {
x, ok = []byte(y), true
}
}
if !ok && pValue != nil {
return typeMismatchReason(pValue, v)
}
if v.Type().Elem().Kind() != reflect.Uint8 {
return typeMismatchReason(pValue, v)
}
v.SetBytes(x)
default:
return typeMismatchReason(pValue, v)
}
return ""
}
// initField is similar to reflect's Value.FieldByIndex, in that it
// returns the nested struct field corresponding to index, but it
// initialises any nil pointers encountered when traversing the structure.
func initField(val reflect.Value, index []int) reflect.Value {
for _, i := range index[:len(index)-1] {
val = val.Field(i)
if val.Kind() == reflect.Ptr {
if val.IsNil() {
val.Set(reflect.New(val.Type().Elem()))
}
val = val.Elem()
}
}
return val.Field(index[len(index)-1])
}
// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
func loadEntity(dst interface{}, src *pb.EntityProto) (err error) {
ent, err := protoToEntity(src)
if err != nil {
return err
}
if e, ok := dst.(PropertyLoadSaver); ok {
return e.Load(ent.Properties)
}
return LoadStruct(dst, ent.Properties)
}
func (s structPLS) Load(props []Property) error {
var fieldName, reason string
var l propertyLoader
for _, p := range props {
if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" {
// We don't return early, as we try to load as many properties as possible.
// It is valid to load an entity into a struct that cannot fully represent it.
// That case returns an error, but the caller is free to ignore it.
fieldName, reason = p.Name, errStr
}
}
if reason != "" {
return &ErrFieldMismatch{
StructType: s.v.Type(),
FieldName: fieldName,
Reason: reason,
}
}
return nil
}
func protoToEntity(src *pb.EntityProto) (*Entity, error) {
props, rawProps := src.Property, src.RawProperty
outProps := make([]Property, 0, len(props)+len(rawProps))
for {
var (
x *pb.Property
noIndex bool
)
if len(props) > 0 {
x, props = props[0], props[1:]
} else if len(rawProps) > 0 {
x, rawProps = rawProps[0], rawProps[1:]
noIndex = true
} else {
break
}
var value interface{}
if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {
value = indexValue{x.Value}
} else {
var err error
value, err = propValue(x.Value, x.GetMeaning())
if err != nil {
return nil, err
}
}
outProps = append(outProps, Property{
Name: x.GetName(),
Value: value,
NoIndex: noIndex,
Multiple: x.GetMultiple(),
})
}
var key *Key
if src.Key != nil {
// Ignore any error, since nested entity values
// are allowed to have an invalid key.
key, _ = protoToKey(src.Key)
}
return &Entity{key, outProps}, nil
}
// propValue returns a Go value that combines the raw PropertyValue with a
// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.
func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {
switch {
case v.Int64Value != nil:
if m == pb.Property_GD_WHEN {
return fromUnixMicro(*v.Int64Value), nil
} else {
return *v.Int64Value, nil
}
case v.BooleanValue != nil:
return *v.BooleanValue, nil
case v.StringValue != nil:
if m == pb.Property_BLOB {
return []byte(*v.StringValue), nil
} else if m == pb.Property_BLOBKEY {
return appengine.BlobKey(*v.StringValue), nil
} else if m == pb.Property_BYTESTRING {
return ByteString(*v.StringValue), nil
} else if m == pb.Property_ENTITY_PROTO {
var ent pb.EntityProto
err := proto.Unmarshal([]byte(*v.StringValue), &ent)
if err != nil {
return nil, err
}
return protoToEntity(&ent)
} else {
return *v.StringValue, nil
}
case v.DoubleValue != nil:
return *v.DoubleValue, nil
case v.Referencevalue != nil:
key, err := referenceValueToKey(v.Referencevalue)
if err != nil {
return nil, err
}
return key, nil
case v.Pointvalue != nil:
// NOTE: Strangely, latitude maps to X, longitude to Y.
return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil
}
return nil, nil
}
// indexValue is a Property value that is created when entities are loaded from
// an index, such as from a projection query.
//
// Such Property values do not contain all of the metadata required to be
// faithfully represented as a Go value, and are instead represented as an
// opaque indexValue. Load the properties into a concrete struct type (e.g. by
// passing a struct pointer to Iterator.Next) to reconstruct actual Go values
// of type int, string, time.Time, etc.
type indexValue struct {
value *pb.PropertyValue
}

View File

@ -0,0 +1,656 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"reflect"
"testing"
proto "github.com/golang/protobuf/proto"
pb "google.golang.org/appengine/internal/datastore"
)
type Simple struct {
I int64
}
type SimpleWithTag struct {
I int64 `datastore:"II"`
}
type NestedSimpleWithTag struct {
A SimpleWithTag `datastore:"AA"`
}
type NestedSliceOfSimple struct {
A []Simple
}
type SimpleTwoFields struct {
S string
SS string
}
type NestedSimpleAnonymous struct {
Simple
X string
}
type NestedSimple struct {
A Simple
I int64
}
type NestedSimple1 struct {
A Simple
X string
}
type NestedSimple2X struct {
AA NestedSimple
A SimpleTwoFields
S string
}
type BDotB struct {
B string `datastore:"B.B"`
}
type ABDotB struct {
A BDotB
}
type MultiAnonymous struct {
Simple
SimpleTwoFields
X string
}
var (
// these values need to be addressable
testString2 = "two"
testString3 = "three"
testInt64 = int64(2)
fieldNameI = "I"
fieldNameX = "X"
fieldNameS = "S"
fieldNameSS = "SS"
fieldNameADotI = "A.I"
fieldNameAADotII = "AA.II"
fieldNameADotBDotB = "A.B.B"
)
func TestLoadEntityNestedLegacy(t *testing.T) {
testCases := []struct {
desc string
src *pb.EntityProto
want interface{}
}{
{
"nested",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameX,
Value: &pb.PropertyValue{
StringValue: &testString2,
},
},
&pb.Property{
Name: &fieldNameADotI,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
},
},
},
&NestedSimple1{
A: Simple{I: testInt64},
X: testString2,
},
},
{
"nested with tag",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameAADotII,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
},
},
},
&NestedSimpleWithTag{
A: SimpleWithTag{I: testInt64},
},
},
{
"nested with anonymous struct field",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameX,
Value: &pb.PropertyValue{
StringValue: &testString2,
},
},
&pb.Property{
Name: &fieldNameI,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
},
},
},
&NestedSimpleAnonymous{
Simple: Simple{I: testInt64},
X: testString2,
},
},
{
"nested with dotted field tag",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameADotBDotB,
Value: &pb.PropertyValue{
StringValue: &testString2,
},
},
},
},
&ABDotB{
A: BDotB{
B: testString2,
},
},
},
{
"nested with dotted field tag",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameI,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
},
&pb.Property{
Name: &fieldNameS,
Value: &pb.PropertyValue{
StringValue: &testString2,
},
},
&pb.Property{
Name: &fieldNameSS,
Value: &pb.PropertyValue{
StringValue: &testString3,
},
},
&pb.Property{
Name: &fieldNameX,
Value: &pb.PropertyValue{
StringValue: &testString3,
},
},
},
},
&MultiAnonymous{
Simple: Simple{I: testInt64},
SimpleTwoFields: SimpleTwoFields{S: "two", SS: "three"},
X: "three",
},
},
}
for _, tc := range testCases {
dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
err := loadEntity(dst, tc.src)
if err != nil {
t.Errorf("loadEntity: %s: %v", tc.desc, err)
continue
}
if !reflect.DeepEqual(tc.want, dst) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
}
}
}
type WithKey struct {
X string
I int64
K *Key `datastore:"__key__"`
}
type NestedWithKey struct {
N WithKey
Y string
}
var (
incompleteKey = newKey("", nil)
invalidKey = newKey("s", incompleteKey)
// these values need to be addressable
fieldNameA = "A"
fieldNameK = "K"
fieldNameN = "N"
fieldNameY = "Y"
fieldNameAA = "AA"
fieldNameII = "II"
fieldNameBDotB = "B.B"
entityProtoMeaning = pb.Property_ENTITY_PROTO
TRUE = true
FALSE = false
)
var (
simpleEntityProto, nestedSimpleEntityProto,
simpleTwoFieldsEntityProto, simpleWithTagEntityProto,
bDotBEntityProto, withKeyEntityProto string
)
func init() {
// simpleEntityProto corresponds to:
// Simple{I: testInt64}
simpleEntityProtob, err := proto.Marshal(&pb.EntityProto{
Key: keyToProto("", incompleteKey),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameI,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
Multiple: &FALSE,
},
},
EntityGroup: &pb.Path{},
})
if err != nil {
panic(err)
}
simpleEntityProto = string(simpleEntityProtob)
// nestedSimpleEntityProto corresponds to:
// NestedSimple{
// A: Simple{I: testInt64},
// I: testInt64,
// }
nestedSimpleEntityProtob, err := proto.Marshal(&pb.EntityProto{
Key: keyToProto("", incompleteKey),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameA,
Meaning: &entityProtoMeaning,
Value: &pb.PropertyValue{
StringValue: &simpleEntityProto,
},
Multiple: &FALSE,
},
&pb.Property{
Name: &fieldNameI,
Meaning: &entityProtoMeaning,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
Multiple: &FALSE,
},
},
EntityGroup: &pb.Path{},
})
if err != nil {
panic(err)
}
nestedSimpleEntityProto = string(nestedSimpleEntityProtob)
// simpleTwoFieldsEntityProto corresponds to:
// SimpleTwoFields{S: testString2, SS: testString3}
simpleTwoFieldsEntityProtob, err := proto.Marshal(&pb.EntityProto{
Key: keyToProto("", incompleteKey),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameS,
Value: &pb.PropertyValue{
StringValue: &testString2,
},
Multiple: &FALSE,
},
&pb.Property{
Name: &fieldNameSS,
Value: &pb.PropertyValue{
StringValue: &testString3,
},
Multiple: &FALSE,
},
},
EntityGroup: &pb.Path{},
})
if err != nil {
panic(err)
}
simpleTwoFieldsEntityProto = string(simpleTwoFieldsEntityProtob)
// simpleWithTagEntityProto corresponds to:
// SimpleWithTag{I: testInt64}
simpleWithTagEntityProtob, err := proto.Marshal(&pb.EntityProto{
Key: keyToProto("", incompleteKey),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameII,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
Multiple: &FALSE,
},
},
EntityGroup: &pb.Path{},
})
if err != nil {
panic(err)
}
simpleWithTagEntityProto = string(simpleWithTagEntityProtob)
// bDotBEntityProto corresponds to:
// BDotB{
// B: testString2,
// }
bDotBEntityProtob, err := proto.Marshal(&pb.EntityProto{
Key: keyToProto("", incompleteKey),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameBDotB,
Value: &pb.PropertyValue{
StringValue: &testString2,
},
Multiple: &FALSE,
},
},
EntityGroup: &pb.Path{},
})
if err != nil {
panic(err)
}
bDotBEntityProto = string(bDotBEntityProtob)
// withKeyEntityProto corresponds to:
// WithKey{
// X: testString3,
// I: testInt64,
// K: testKey1a,
// }
withKeyEntityProtob, err := proto.Marshal(&pb.EntityProto{
Key: keyToProto("", testKey1a),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameX,
Value: &pb.PropertyValue{
StringValue: &testString3,
},
Multiple: &FALSE,
},
&pb.Property{
Name: &fieldNameI,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
Multiple: &FALSE,
},
},
EntityGroup: &pb.Path{},
})
if err != nil {
panic(err)
}
withKeyEntityProto = string(withKeyEntityProtob)
}
func TestLoadEntityNested(t *testing.T) {
testCases := []struct {
desc string
src *pb.EntityProto
want interface{}
}{
{
"nested basic",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameA,
Meaning: &entityProtoMeaning,
Value: &pb.PropertyValue{
StringValue: &simpleEntityProto,
},
},
&pb.Property{
Name: &fieldNameI,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
},
},
},
&NestedSimple{
A: Simple{I: 2},
I: 2,
},
},
{
"nested with struct tags",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameAA,
Meaning: &entityProtoMeaning,
Value: &pb.PropertyValue{
StringValue: &simpleWithTagEntityProto,
},
},
},
},
&NestedSimpleWithTag{
A: SimpleWithTag{I: testInt64},
},
},
{
"nested 2x",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameAA,
Meaning: &entityProtoMeaning,
Value: &pb.PropertyValue{
StringValue: &nestedSimpleEntityProto,
},
},
&pb.Property{
Name: &fieldNameA,
Meaning: &entityProtoMeaning,
Value: &pb.PropertyValue{
StringValue: &simpleTwoFieldsEntityProto,
},
},
&pb.Property{
Name: &fieldNameS,
Value: &pb.PropertyValue{
StringValue: &testString3,
},
},
},
},
&NestedSimple2X{
AA: NestedSimple{
A: Simple{I: testInt64},
I: testInt64,
},
A: SimpleTwoFields{S: testString2, SS: testString3},
S: testString3,
},
},
{
"nested anonymous",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameI,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
},
&pb.Property{
Name: &fieldNameX,
Value: &pb.PropertyValue{
StringValue: &testString2,
},
},
},
},
&NestedSimpleAnonymous{
Simple: Simple{I: testInt64},
X: testString2,
},
},
{
"nested simple with slice",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameA,
Meaning: &entityProtoMeaning,
Multiple: &TRUE,
Value: &pb.PropertyValue{
StringValue: &simpleEntityProto,
},
},
&pb.Property{
Name: &fieldNameA,
Meaning: &entityProtoMeaning,
Multiple: &TRUE,
Value: &pb.PropertyValue{
StringValue: &simpleEntityProto,
},
},
},
},
&NestedSliceOfSimple{
A: []Simple{Simple{I: testInt64}, Simple{I: testInt64}},
},
},
{
"nested with multiple anonymous fields",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameI,
Value: &pb.PropertyValue{
Int64Value: &testInt64,
},
},
&pb.Property{
Name: &fieldNameS,
Value: &pb.PropertyValue{
StringValue: &testString2,
},
},
&pb.Property{
Name: &fieldNameSS,
Value: &pb.PropertyValue{
StringValue: &testString3,
},
},
&pb.Property{
Name: &fieldNameX,
Value: &pb.PropertyValue{
StringValue: &testString2,
},
},
},
},
&MultiAnonymous{
Simple: Simple{I: testInt64},
SimpleTwoFields: SimpleTwoFields{S: testString2, SS: testString3},
X: testString2,
},
},
{
"nested with dotted field tag",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameA,
Meaning: &entityProtoMeaning,
Value: &pb.PropertyValue{
StringValue: &bDotBEntityProto,
},
},
},
},
&ABDotB{
A: BDotB{
B: testString2,
},
},
},
{
"nested entity with key",
&pb.EntityProto{
Key: keyToProto("some-app-id", testKey0),
Property: []*pb.Property{
&pb.Property{
Name: &fieldNameY,
Value: &pb.PropertyValue{
StringValue: &testString2,
},
},
&pb.Property{
Name: &fieldNameN,
Meaning: &entityProtoMeaning,
Value: &pb.PropertyValue{
StringValue: &withKeyEntityProto,
},
},
},
},
&NestedWithKey{
Y: testString2,
N: WithKey{
X: testString3,
I: testInt64,
K: testKey1a,
},
},
},
}
for _, tc := range testCases {
dst := reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
err := loadEntity(dst, tc.src)
if err != nil {
t.Errorf("loadEntity: %s: %v", tc.desc, err)
continue
}
if !reflect.DeepEqual(tc.want, dst) {
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
}
}
}

View File

@ -0,0 +1,78 @@
// Copyright 2016 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import "golang.org/x/net/context"
// Datastore kinds for the metadata entities.
const (
namespaceKind = "__namespace__"
kindKind = "__kind__"
propertyKind = "__property__"
)
// Namespaces returns all the datastore namespaces.
func Namespaces(ctx context.Context) ([]string, error) {
// TODO(djd): Support range queries.
q := NewQuery(namespaceKind).KeysOnly()
keys, err := q.GetAll(ctx, nil)
if err != nil {
return nil, err
}
// The empty namespace key uses a numeric ID (==1), but luckily
// the string ID defaults to "" for numeric IDs anyway.
return keyNames(keys), nil
}
// Kinds returns the names of all the kinds in the current namespace.
func Kinds(ctx context.Context) ([]string, error) {
// TODO(djd): Support range queries.
q := NewQuery(kindKind).KeysOnly()
keys, err := q.GetAll(ctx, nil)
if err != nil {
return nil, err
}
return keyNames(keys), nil
}
// keyNames returns a slice of the provided keys' names (string IDs).
func keyNames(keys []*Key) []string {
n := make([]string, 0, len(keys))
for _, k := range keys {
n = append(n, k.StringID())
}
return n
}
// KindProperties returns all the indexed properties for the given kind.
// The properties are returned as a map of property names to a slice of the
// representation types. The representation types for the supported Go property
// types are:
// "INT64": signed integers and time.Time
// "DOUBLE": float32 and float64
// "BOOLEAN": bool
// "STRING": string, []byte and ByteString
// "POINT": appengine.GeoPoint
// "REFERENCE": *Key
// "USER": (not used in the Go runtime)
func KindProperties(ctx context.Context, kind string) (map[string][]string, error) {
// TODO(djd): Support range queries.
kindKey := NewKey(ctx, kindKind, kind, 0, nil)
q := NewQuery(propertyKind).Ancestor(kindKey)
propMap := map[string][]string{}
props := []struct {
Repr []string `datastore:"property_representation"`
}{}
keys, err := q.GetAll(ctx, &props)
if err != nil {
return nil, err
}
for i, p := range props {
propMap[keys[i].StringID()] = p.Repr
}
return propMap, nil
}

330
vendor/google.golang.org/appengine/datastore/prop.go generated vendored Normal file
View File

@ -0,0 +1,330 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"fmt"
"reflect"
"strings"
"sync"
"unicode"
)
// Entities with more than this many indexed properties will not be saved.
const maxIndexedProperties = 20000
// []byte fields more than 1 megabyte long will not be loaded or saved.
const maxBlobLen = 1 << 20
// Property is a name/value pair plus some metadata. A datastore entity's
// contents are loaded and saved as a sequence of Properties. An entity can
// have multiple Properties with the same name, provided that p.Multiple is
// true on all of that entity's Properties with that name.
type Property struct {
// Name is the property name.
Name string
// Value is the property value. The valid types are:
// - int64
// - bool
// - string
// - float64
// - ByteString
// - *Key
// - time.Time
// - appengine.BlobKey
// - appengine.GeoPoint
// - []byte (up to 1 megabyte in length)
// - *Entity (representing a nested struct)
// This set is smaller than the set of valid struct field types that the
// datastore can load and save. A Property Value cannot be a slice (apart
// from []byte); use multiple Properties instead. Also, a Value's type
// must be explicitly on the list above; it is not sufficient for the
// underlying type to be on that list. For example, a Value of "type
// myInt64 int64" is invalid. Smaller-width integers and floats are also
// invalid. Again, this is more restrictive than the set of valid struct
// field types.
//
// A Value will have an opaque type when loading entities from an index,
// such as via a projection query. Load entities into a struct instead
// of a PropertyLoadSaver when using a projection query.
//
// A Value may also be the nil interface value; this is equivalent to
// Python's None but not directly representable by a Go struct. Loading
// a nil-valued property into a struct will set that field to the zero
// value.
Value interface{}
// NoIndex is whether the datastore cannot index this property.
NoIndex bool
// Multiple is whether the entity can have multiple properties with
// the same name. Even if a particular instance only has one property with
// a certain name, Multiple should be true if a struct would best represent
// it as a field of type []T instead of type T.
Multiple bool
}
// An Entity is the value type for a nested struct.
// This type is only used for a Property's Value.
type Entity struct {
Key *Key
Properties []Property
}
// ByteString is a short byte slice (up to 1500 bytes) that can be indexed.
type ByteString []byte
// PropertyLoadSaver can be converted from and to a slice of Properties.
type PropertyLoadSaver interface {
Load([]Property) error
Save() ([]Property, error)
}
// PropertyList converts a []Property to implement PropertyLoadSaver.
type PropertyList []Property
var (
typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
)
// Load loads all of the provided properties into l.
// It does not first reset *l to an empty slice.
func (l *PropertyList) Load(p []Property) error {
*l = append(*l, p...)
return nil
}
// Save saves all of l's properties as a slice or Properties.
func (l *PropertyList) Save() ([]Property, error) {
return *l, nil
}
// validPropertyName returns whether name consists of one or more valid Go
// identifiers joined by ".".
func validPropertyName(name string) bool {
if name == "" {
return false
}
for _, s := range strings.Split(name, ".") {
if s == "" {
return false
}
first := true
for _, c := range s {
if first {
first = false
if c != '_' && !unicode.IsLetter(c) {
return false
}
} else {
if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
}
return true
}
// structCodec describes how to convert a struct to and from a sequence of
// properties.
type structCodec struct {
// fields gives the field codec for the structTag with the given name.
fields map[string]fieldCodec
// hasSlice is whether a struct or any of its nested or embedded structs
// has a slice-typed field (other than []byte).
hasSlice bool
// keyField is the index of a *Key field with structTag __key__.
// This field is not relevant for the top level struct, only for
// nested structs.
keyField int
// complete is whether the structCodec is complete. An incomplete
// structCodec may be encountered when walking a recursive struct.
complete bool
}
// fieldCodec is a struct field's index and, if that struct field's type is
// itself a struct, that substruct's structCodec.
type fieldCodec struct {
// path is the index path to the field
path []int
noIndex bool
// omitEmpty indicates that the field should be omitted on save
// if empty.
omitEmpty bool
// structCodec is the codec fot the struct field at index 'path',
// or nil if the field is not a struct.
structCodec *structCodec
}
// structCodecs collects the structCodecs that have already been calculated.
var (
structCodecsMutex sync.Mutex
structCodecs = make(map[reflect.Type]*structCodec)
)
// getStructCodec returns the structCodec for the given struct type.
func getStructCodec(t reflect.Type) (*structCodec, error) {
structCodecsMutex.Lock()
defer structCodecsMutex.Unlock()
return getStructCodecLocked(t)
}
// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
// be held when calling this function.
func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
c, ok := structCodecs[t]
if ok {
return c, nil
}
c = &structCodec{
fields: make(map[string]fieldCodec),
// We initialize keyField to -1 so that the zero-value is not
// misinterpreted as index 0.
keyField: -1,
}
// Add c to the structCodecs map before we are sure it is good. If t is
// a recursive type, it needs to find the incomplete entry for itself in
// the map.
structCodecs[t] = c
defer func() {
if retErr != nil {
delete(structCodecs, t)
}
}()
for i := 0; i < t.NumField(); i++ {
f := t.Field(i)
// Skip unexported fields.
// Note that if f is an anonymous, unexported struct field,
// we will promote its fields.
if f.PkgPath != "" && !f.Anonymous {
continue
}
tags := strings.Split(f.Tag.Get("datastore"), ",")
name := tags[0]
opts := make(map[string]bool)
for _, t := range tags[1:] {
opts[t] = true
}
switch {
case name == "":
if !f.Anonymous {
name = f.Name
}
case name == "-":
continue
case name == "__key__":
if f.Type != typeOfKeyPtr {
return nil, fmt.Errorf("datastore: __key__ field on struct %v is not a *datastore.Key", t)
}
c.keyField = i
case !validPropertyName(name):
return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
}
substructType, fIsSlice := reflect.Type(nil), false
switch f.Type.Kind() {
case reflect.Struct:
substructType = f.Type
case reflect.Slice:
if f.Type.Elem().Kind() == reflect.Struct {
substructType = f.Type.Elem()
}
fIsSlice = f.Type != typeOfByteSlice
c.hasSlice = c.hasSlice || fIsSlice
}
var sub *structCodec
if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
var err error
sub, err = getStructCodecLocked(substructType)
if err != nil {
return nil, err
}
if !sub.complete {
return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
}
if fIsSlice && sub.hasSlice {
return nil, fmt.Errorf(
"datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
}
c.hasSlice = c.hasSlice || sub.hasSlice
// If f is an anonymous struct field, we promote the substruct's fields up to this level
// in the linked list of struct codecs.
if f.Anonymous {
for subname, subfield := range sub.fields {
if name != "" {
subname = name + "." + subname
}
if _, ok := c.fields[subname]; ok {
return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", subname)
}
c.fields[subname] = fieldCodec{
path: append([]int{i}, subfield.path...),
noIndex: subfield.noIndex || opts["noindex"],
omitEmpty: subfield.omitEmpty,
structCodec: subfield.structCodec,
}
}
continue
}
}
if _, ok := c.fields[name]; ok {
return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
}
c.fields[name] = fieldCodec{
path: []int{i},
noIndex: opts["noindex"],
omitEmpty: opts["omitempty"],
structCodec: sub,
}
}
c.complete = true
return c, nil
}
// structPLS adapts a struct to be a PropertyLoadSaver.
type structPLS struct {
v reflect.Value
codec *structCodec
}
// newStructPLS returns a structPLS, which implements the
// PropertyLoadSaver interface, for the struct pointer p.
func newStructPLS(p interface{}) (*structPLS, error) {
v := reflect.ValueOf(p)
if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
return nil, ErrInvalidEntityType
}
v = v.Elem()
codec, err := getStructCodec(v.Type())
if err != nil {
return nil, err
}
return &structPLS{v, codec}, nil
}
// LoadStruct loads the properties from p to dst.
// dst must be a struct pointer.
func LoadStruct(dst interface{}, p []Property) error {
x, err := newStructPLS(dst)
if err != nil {
return err
}
return x.Load(p)
}
// SaveStruct returns the properties from src as a slice of Properties.
// src must be a struct pointer.
func SaveStruct(src interface{}) ([]Property, error) {
x, err := newStructPLS(src)
if err != nil {
return nil, err
}
return x.Save()
}

View File

@ -0,0 +1,672 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"reflect"
"sort"
"testing"
"time"
"google.golang.org/appengine"
)
func TestValidPropertyName(t *testing.T) {
testCases := []struct {
name string
want bool
}{
// Invalid names.
{"", false},
{"'", false},
{".", false},
{"..", false},
{".foo", false},
{"0", false},
{"00", false},
{"X.X.4.X.X", false},
{"\n", false},
{"\x00", false},
{"abc\xffz", false},
{"foo.", false},
{"foo..", false},
{"foo..bar", false},
{"☃", false},
{`"`, false},
// Valid names.
{"AB", true},
{"Abc", true},
{"X.X.X.X.X", true},
{"_", true},
{"_0", true},
{"a", true},
{"a_B", true},
{"f00", true},
{"f0o", true},
{"fo0", true},
{"foo", true},
{"foo.bar", true},
{"foo.bar.baz", true},
{"世界", true},
}
for _, tc := range testCases {
got := validPropertyName(tc.name)
if got != tc.want {
t.Errorf("%q: got %v, want %v", tc.name, got, tc.want)
}
}
}
func TestStructCodec(t *testing.T) {
type oStruct struct {
O int
}
type pStruct struct {
P int
Q int
}
type rStruct struct {
R int
S pStruct
T oStruct
oStruct
}
type uStruct struct {
U int
v int
}
type vStruct struct {
V string `datastore:",noindex"`
}
oStructCodec := &structCodec{
fields: map[string]fieldCodec{
"O": {path: []int{0}},
},
complete: true,
}
pStructCodec := &structCodec{
fields: map[string]fieldCodec{
"P": {path: []int{0}},
"Q": {path: []int{1}},
},
complete: true,
}
rStructCodec := &structCodec{
fields: map[string]fieldCodec{
"R": {path: []int{0}},
"S": {path: []int{1}, structCodec: pStructCodec},
"T": {path: []int{2}, structCodec: oStructCodec},
"O": {path: []int{3, 0}},
},
complete: true,
}
uStructCodec := &structCodec{
fields: map[string]fieldCodec{
"U": {path: []int{0}},
},
complete: true,
}
vStructCodec := &structCodec{
fields: map[string]fieldCodec{
"V": {path: []int{0}, noIndex: true},
},
complete: true,
}
testCases := []struct {
desc string
structValue interface{}
want *structCodec
}{
{
"oStruct",
oStruct{},
oStructCodec,
},
{
"pStruct",
pStruct{},
pStructCodec,
},
{
"rStruct",
rStruct{},
rStructCodec,
},
{
"uStruct",
uStruct{},
uStructCodec,
},
{
"non-basic fields",
struct {
B appengine.BlobKey
K *Key
T time.Time
}{},
&structCodec{
fields: map[string]fieldCodec{
"B": {path: []int{0}},
"K": {path: []int{1}},
"T": {path: []int{2}},
},
complete: true,
},
},
{
"struct tags with ignored embed",
struct {
A int `datastore:"a,noindex"`
B int `datastore:"b"`
C int `datastore:",noindex"`
D int `datastore:""`
E int
I int `datastore:"-"`
J int `datastore:",noindex" json:"j"`
oStruct `datastore:"-"`
}{},
&structCodec{
fields: map[string]fieldCodec{
"a": {path: []int{0}, noIndex: true},
"b": {path: []int{1}},
"C": {path: []int{2}, noIndex: true},
"D": {path: []int{3}},
"E": {path: []int{4}},
"J": {path: []int{6}, noIndex: true},
},
complete: true,
},
},
{
"unexported fields",
struct {
A int
b int
C int `datastore:"x"`
d int `datastore:"Y"`
}{},
&structCodec{
fields: map[string]fieldCodec{
"A": {path: []int{0}},
"x": {path: []int{2}},
},
complete: true,
},
},
{
"nested and embedded structs",
struct {
A int
B int
CC oStruct
DDD rStruct
oStruct
}{},
&structCodec{
fields: map[string]fieldCodec{
"A": {path: []int{0}},
"B": {path: []int{1}},
"CC": {path: []int{2}, structCodec: oStructCodec},
"DDD": {path: []int{3}, structCodec: rStructCodec},
"O": {path: []int{4, 0}},
},
complete: true,
},
},
{
"struct tags with nested and embedded structs",
struct {
A int `datastore:"-"`
B int `datastore:"w"`
C oStruct `datastore:"xx"`
D rStruct `datastore:"y"`
oStruct `datastore:"z"`
}{},
&structCodec{
fields: map[string]fieldCodec{
"w": {path: []int{1}},
"xx": {path: []int{2}, structCodec: oStructCodec},
"y": {path: []int{3}, structCodec: rStructCodec},
"z.O": {path: []int{4, 0}},
},
complete: true,
},
},
{
"unexported nested and embedded structs",
struct {
a int
B int
c uStruct
D uStruct
uStruct
}{},
&structCodec{
fields: map[string]fieldCodec{
"B": {path: []int{1}},
"D": {path: []int{3}, structCodec: uStructCodec},
"U": {path: []int{4, 0}},
},
complete: true,
},
},
{
"noindex nested struct",
struct {
A oStruct `datastore:",noindex"`
}{},
&structCodec{
fields: map[string]fieldCodec{
"A": {path: []int{0}, structCodec: oStructCodec, noIndex: true},
},
complete: true,
},
},
{
"noindex slice",
struct {
A []string `datastore:",noindex"`
}{},
&structCodec{
fields: map[string]fieldCodec{
"A": {path: []int{0}, noIndex: true},
},
hasSlice: true,
complete: true,
},
},
{
"noindex embedded struct slice",
struct {
// vStruct has a single field, V, also with noindex.
A []vStruct `datastore:",noindex"`
}{},
&structCodec{
fields: map[string]fieldCodec{
"A": {path: []int{0}, structCodec: vStructCodec, noIndex: true},
},
hasSlice: true,
complete: true,
},
},
}
for _, tc := range testCases {
got, err := getStructCodec(reflect.TypeOf(tc.structValue))
if err != nil {
t.Errorf("%s: getStructCodec: %v", tc.desc, err)
continue
}
// can't reflect.DeepEqual b/c element order in fields map may differ
if !isEqualStructCodec(got, tc.want) {
t.Errorf("%s\ngot %+v\nwant %+v\n", tc.desc, got, tc.want)
}
}
}
func isEqualStructCodec(got, want *structCodec) bool {
if got.complete != want.complete {
return false
}
if got.hasSlice != want.hasSlice {
return false
}
if len(got.fields) != len(want.fields) {
return false
}
for name, wantF := range want.fields {
gotF := got.fields[name]
if !reflect.DeepEqual(wantF.path, gotF.path) {
return false
}
if wantF.noIndex != gotF.noIndex {
return false
}
if wantF.structCodec != nil {
if gotF.structCodec == nil {
return false
}
if !isEqualStructCodec(gotF.structCodec, wantF.structCodec) {
return false
}
}
}
return true
}
func TestRepeatedPropertyName(t *testing.T) {
good := []interface{}{
struct {
A int `datastore:"-"`
}{},
struct {
A int `datastore:"b"`
B int
}{},
struct {
A int
B int `datastore:"B"`
}{},
struct {
A int `datastore:"B"`
B int `datastore:"-"`
}{},
struct {
A int `datastore:"-"`
B int `datastore:"A"`
}{},
struct {
A int `datastore:"B"`
B int `datastore:"A"`
}{},
struct {
A int `datastore:"B"`
B int `datastore:"C"`
C int `datastore:"A"`
}{},
struct {
A int `datastore:"B"`
B int `datastore:"C"`
C int `datastore:"D"`
}{},
}
bad := []interface{}{
struct {
A int `datastore:"B"`
B int
}{},
struct {
A int
B int `datastore:"A"`
}{},
struct {
A int `datastore:"C"`
B int `datastore:"C"`
}{},
struct {
A int `datastore:"B"`
B int `datastore:"C"`
C int `datastore:"B"`
}{},
}
testGetStructCodec(t, good, bad)
}
func TestFlatteningNestedStructs(t *testing.T) {
type DeepGood struct {
A struct {
B []struct {
C struct {
D int
}
}
}
}
type DeepBad struct {
A struct {
B []struct {
C struct {
D []int
}
}
}
}
type ISay struct {
Tomato int
}
type YouSay struct {
Tomato int
}
type Tweedledee struct {
Dee int `datastore:"D"`
}
type Tweedledum struct {
Dum int `datastore:"D"`
}
good := []interface{}{
struct {
X []struct {
Y string
}
}{},
struct {
X []struct {
Y []byte
}
}{},
struct {
P []int
X struct {
Y []int
}
}{},
struct {
X struct {
Y []int
}
Q []int
}{},
struct {
P []int
X struct {
Y []int
}
Q []int
}{},
struct {
DeepGood
}{},
struct {
DG DeepGood
}{},
struct {
Foo struct {
Z int
} `datastore:"A"`
Bar struct {
Z int
} `datastore:"B"`
}{},
}
bad := []interface{}{
struct {
X []struct {
Y []string
}
}{},
struct {
X []struct {
Y []int
}
}{},
struct {
DeepBad
}{},
struct {
DB DeepBad
}{},
struct {
ISay
YouSay
}{},
struct {
Tweedledee
Tweedledum
}{},
struct {
Foo struct {
Z int
} `datastore:"A"`
Bar struct {
Z int
} `datastore:"A"`
}{},
}
testGetStructCodec(t, good, bad)
}
func testGetStructCodec(t *testing.T, good []interface{}, bad []interface{}) {
for _, x := range good {
if _, err := getStructCodec(reflect.TypeOf(x)); err != nil {
t.Errorf("type %T: got non-nil error (%s), want nil", x, err)
}
}
for _, x := range bad {
if _, err := getStructCodec(reflect.TypeOf(x)); err == nil {
t.Errorf("type %T: got nil error, want non-nil", x)
}
}
}
func TestNilKeyIsStored(t *testing.T) {
x := struct {
K *Key
I int
}{}
p := PropertyList{}
// Save x as properties.
p1, _ := SaveStruct(&x)
p.Load(p1)
// Set x's fields to non-zero.
x.K = &Key{}
x.I = 2
// Load x from properties.
p2, _ := p.Save()
LoadStruct(&x, p2)
// Check that x's fields were set to zero.
if x.K != nil {
t.Errorf("K field was not zero")
}
if x.I != 0 {
t.Errorf("I field was not zero")
}
}
func TestSaveStructOmitEmpty(t *testing.T) {
// Expected props names are sorted alphabetically
expectedPropNamesForSingles := []string{"EmptyValue", "NonEmptyValue", "OmitEmptyWithValue"}
expectedPropNamesForSlices := []string{"NonEmptyValue", "NonEmptyValue", "OmitEmptyWithValue", "OmitEmptyWithValue"}
testOmitted := func(expectedPropNames []string, src interface{}) {
// t.Helper() - this is available from Go version 1.9, but we also support Go versions 1.6, 1.7, 1.8
if props, err := SaveStruct(src); err != nil {
t.Fatal(err)
} else {
// Collect names for reporting if diffs from expected and for easier sorting
actualPropNames := make([]string, len(props))
for i := range props {
actualPropNames[i] = props[i].Name
}
// Sort actuals for comparing with already sorted expected names
sort.Sort(sort.StringSlice(actualPropNames))
if !reflect.DeepEqual(actualPropNames, expectedPropNames) {
t.Errorf("Expected this properties: %v, got: %v", expectedPropNames, actualPropNames)
}
}
}
testOmitted(expectedPropNamesForSingles, &struct {
EmptyValue int
NonEmptyValue int
OmitEmptyNoValue int `datastore:",omitempty"`
OmitEmptyWithValue int `datastore:",omitempty"`
}{
NonEmptyValue: 1,
OmitEmptyWithValue: 2,
})
testOmitted(expectedPropNamesForSlices, &struct {
EmptyValue []int
NonEmptyValue []int
OmitEmptyNoValue []int `datastore:",omitempty"`
OmitEmptyWithValue []int `datastore:",omitempty"`
}{
NonEmptyValue: []int{1, 2},
OmitEmptyWithValue: []int{3, 4},
})
testOmitted(expectedPropNamesForSingles, &struct {
EmptyValue bool
NonEmptyValue bool
OmitEmptyNoValue bool `datastore:",omitempty"`
OmitEmptyWithValue bool `datastore:",omitempty"`
}{
NonEmptyValue: true,
OmitEmptyWithValue: true,
})
testOmitted(expectedPropNamesForSlices, &struct {
EmptyValue []bool
NonEmptyValue []bool
OmitEmptyNoValue []bool `datastore:",omitempty"`
OmitEmptyWithValue []bool `datastore:",omitempty"`
}{
NonEmptyValue: []bool{true, true},
OmitEmptyWithValue: []bool{true, true},
})
testOmitted(expectedPropNamesForSingles, &struct {
EmptyValue string
NonEmptyValue string
OmitEmptyNoValue string `datastore:",omitempty"`
OmitEmptyWithValue string `datastore:",omitempty"`
}{
NonEmptyValue: "s",
OmitEmptyWithValue: "s",
})
testOmitted(expectedPropNamesForSlices, &struct {
EmptyValue []string
NonEmptyValue []string
OmitEmptyNoValue []string `datastore:",omitempty"`
OmitEmptyWithValue []string `datastore:",omitempty"`
}{
NonEmptyValue: []string{"s1", "s2"},
OmitEmptyWithValue: []string{"s3", "s4"},
})
testOmitted(expectedPropNamesForSingles, &struct {
EmptyValue float32
NonEmptyValue float32
OmitEmptyNoValue float32 `datastore:",omitempty"`
OmitEmptyWithValue float32 `datastore:",omitempty"`
}{
NonEmptyValue: 1.1,
OmitEmptyWithValue: 1.2,
})
testOmitted(expectedPropNamesForSlices, &struct {
EmptyValue []float32
NonEmptyValue []float32
OmitEmptyNoValue []float32 `datastore:",omitempty"`
OmitEmptyWithValue []float32 `datastore:",omitempty"`
}{
NonEmptyValue: []float32{1.1, 2.2},
OmitEmptyWithValue: []float32{3.3, 4.4},
})
testOmitted(expectedPropNamesForSingles, &struct {
EmptyValue time.Time
NonEmptyValue time.Time
OmitEmptyNoValue time.Time `datastore:",omitempty"`
OmitEmptyWithValue time.Time `datastore:",omitempty"`
}{
NonEmptyValue: now,
OmitEmptyWithValue: now,
})
testOmitted(expectedPropNamesForSlices, &struct {
EmptyValue []time.Time
NonEmptyValue []time.Time
OmitEmptyNoValue []time.Time `datastore:",omitempty"`
OmitEmptyWithValue []time.Time `datastore:",omitempty"`
}{
NonEmptyValue: []time.Time{now, now},
OmitEmptyWithValue: []time.Time{now, now},
})
}

757
vendor/google.golang.org/appengine/datastore/query.go generated vendored Normal file
View File

@ -0,0 +1,757 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"encoding/base64"
"errors"
"fmt"
"math"
"reflect"
"strings"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/datastore"
)
type operator int
const (
lessThan operator = iota
lessEq
equal
greaterEq
greaterThan
)
var operatorToProto = map[operator]*pb.Query_Filter_Operator{
lessThan: pb.Query_Filter_LESS_THAN.Enum(),
lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),
equal: pb.Query_Filter_EQUAL.Enum(),
greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),
greaterThan: pb.Query_Filter_GREATER_THAN.Enum(),
}
// filter is a conditional filter on query results.
type filter struct {
FieldName string
Op operator
Value interface{}
}
type sortDirection int
const (
ascending sortDirection = iota
descending
)
var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{
ascending: pb.Query_Order_ASCENDING.Enum(),
descending: pb.Query_Order_DESCENDING.Enum(),
}
// order is a sort order on query results.
type order struct {
FieldName string
Direction sortDirection
}
// NewQuery creates a new Query for a specific entity kind.
//
// An empty kind means to return all entities, including entities created and
// managed by other App Engine features, and is called a kindless query.
// Kindless queries cannot include filters or sort orders on property values.
func NewQuery(kind string) *Query {
return &Query{
kind: kind,
limit: -1,
}
}
// Query represents a datastore query.
type Query struct {
kind string
ancestor *Key
filter []filter
order []order
projection []string
distinct bool
keysOnly bool
eventual bool
limit int32
offset int32
count int32
start *pb.CompiledCursor
end *pb.CompiledCursor
err error
}
func (q *Query) clone() *Query {
x := *q
// Copy the contents of the slice-typed fields to a new backing store.
if len(q.filter) > 0 {
x.filter = make([]filter, len(q.filter))
copy(x.filter, q.filter)
}
if len(q.order) > 0 {
x.order = make([]order, len(q.order))
copy(x.order, q.order)
}
return &x
}
// Ancestor returns a derivative query with an ancestor filter.
// The ancestor should not be nil.
func (q *Query) Ancestor(ancestor *Key) *Query {
q = q.clone()
if ancestor == nil {
q.err = errors.New("datastore: nil query ancestor")
return q
}
q.ancestor = ancestor
return q
}
// EventualConsistency returns a derivative query that returns eventually
// consistent results.
// It only has an effect on ancestor queries.
func (q *Query) EventualConsistency() *Query {
q = q.clone()
q.eventual = true
return q
}
// Filter returns a derivative query with a field-based filter.
// The filterStr argument must be a field name followed by optional space,
// followed by an operator, one of ">", "<", ">=", "<=", or "=".
// Fields are compared against the provided value using the operator.
// Multiple filters are AND'ed together.
func (q *Query) Filter(filterStr string, value interface{}) *Query {
q = q.clone()
filterStr = strings.TrimSpace(filterStr)
if len(filterStr) < 1 {
q.err = errors.New("datastore: invalid filter: " + filterStr)
return q
}
f := filter{
FieldName: strings.TrimRight(filterStr, " ><=!"),
Value: value,
}
switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
case "<=":
f.Op = lessEq
case ">=":
f.Op = greaterEq
case "<":
f.Op = lessThan
case ">":
f.Op = greaterThan
case "=":
f.Op = equal
default:
q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
return q
}
q.filter = append(q.filter, f)
return q
}
// Order returns a derivative query with a field-based sort order. Orders are
// applied in the order they are added. The default order is ascending; to sort
// in descending order prefix the fieldName with a minus sign (-).
func (q *Query) Order(fieldName string) *Query {
q = q.clone()
fieldName = strings.TrimSpace(fieldName)
o := order{
Direction: ascending,
FieldName: fieldName,
}
if strings.HasPrefix(fieldName, "-") {
o.Direction = descending
o.FieldName = strings.TrimSpace(fieldName[1:])
} else if strings.HasPrefix(fieldName, "+") {
q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
return q
}
if len(o.FieldName) == 0 {
q.err = errors.New("datastore: empty order")
return q
}
q.order = append(q.order, o)
return q
}
// Project returns a derivative query that yields only the given fields. It
// cannot be used with KeysOnly.
func (q *Query) Project(fieldNames ...string) *Query {
q = q.clone()
q.projection = append([]string(nil), fieldNames...)
return q
}
// Distinct returns a derivative query that yields de-duplicated entities with
// respect to the set of projected fields. It is only used for projection
// queries.
func (q *Query) Distinct() *Query {
q = q.clone()
q.distinct = true
return q
}
// KeysOnly returns a derivative query that yields only keys, not keys and
// entities. It cannot be used with projection queries.
func (q *Query) KeysOnly() *Query {
q = q.clone()
q.keysOnly = true
return q
}
// Limit returns a derivative query that has a limit on the number of results
// returned. A negative value means unlimited.
func (q *Query) Limit(limit int) *Query {
q = q.clone()
if limit < math.MinInt32 || limit > math.MaxInt32 {
q.err = errors.New("datastore: query limit overflow")
return q
}
q.limit = int32(limit)
return q
}
// Offset returns a derivative query that has an offset of how many keys to
// skip over before returning results. A negative value is invalid.
func (q *Query) Offset(offset int) *Query {
q = q.clone()
if offset < 0 {
q.err = errors.New("datastore: negative query offset")
return q
}
if offset > math.MaxInt32 {
q.err = errors.New("datastore: query offset overflow")
return q
}
q.offset = int32(offset)
return q
}
// BatchSize returns a derivative query to fetch the supplied number of results
// at once. This value should be greater than zero, and equal to or less than
// the Limit.
func (q *Query) BatchSize(size int) *Query {
q = q.clone()
if size <= 0 || size > math.MaxInt32 {
q.err = errors.New("datastore: query batch size overflow")
return q
}
q.count = int32(size)
return q
}
// Start returns a derivative query with the given start point.
func (q *Query) Start(c Cursor) *Query {
q = q.clone()
if c.cc == nil {
q.err = errors.New("datastore: invalid cursor")
return q
}
q.start = c.cc
return q
}
// End returns a derivative query with the given end point.
func (q *Query) End(c Cursor) *Query {
q = q.clone()
if c.cc == nil {
q.err = errors.New("datastore: invalid cursor")
return q
}
q.end = c.cc
return q
}
// toProto converts the query to a protocol buffer.
func (q *Query) toProto(dst *pb.Query, appID string) error {
if len(q.projection) != 0 && q.keysOnly {
return errors.New("datastore: query cannot both project and be keys-only")
}
dst.Reset()
dst.App = proto.String(appID)
if q.kind != "" {
dst.Kind = proto.String(q.kind)
}
if q.ancestor != nil {
dst.Ancestor = keyToProto(appID, q.ancestor)
if q.eventual {
dst.Strong = proto.Bool(false)
}
}
if q.projection != nil {
dst.PropertyName = q.projection
if q.distinct {
dst.GroupByPropertyName = q.projection
}
}
if q.keysOnly {
dst.KeysOnly = proto.Bool(true)
dst.RequirePerfectPlan = proto.Bool(true)
}
for _, qf := range q.filter {
if qf.FieldName == "" {
return errors.New("datastore: empty query filter field name")
}
p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)
if errStr != "" {
return errors.New("datastore: bad query filter value type: " + errStr)
}
xf := &pb.Query_Filter{
Op: operatorToProto[qf.Op],
Property: []*pb.Property{p},
}
if xf.Op == nil {
return errors.New("datastore: unknown query filter operator")
}
dst.Filter = append(dst.Filter, xf)
}
for _, qo := range q.order {
if qo.FieldName == "" {
return errors.New("datastore: empty query order field name")
}
xo := &pb.Query_Order{
Property: proto.String(qo.FieldName),
Direction: sortDirectionToProto[qo.Direction],
}
if xo.Direction == nil {
return errors.New("datastore: unknown query order direction")
}
dst.Order = append(dst.Order, xo)
}
if q.limit >= 0 {
dst.Limit = proto.Int32(q.limit)
}
if q.offset != 0 {
dst.Offset = proto.Int32(q.offset)
}
if q.count != 0 {
dst.Count = proto.Int32(q.count)
}
dst.CompiledCursor = q.start
dst.EndCompiledCursor = q.end
dst.Compile = proto.Bool(true)
return nil
}
// Count returns the number of results for the query.
//
// The running time and number of API calls made by Count scale linearly with
// the sum of the query's offset and limit. Unless the result count is
// expected to be small, it is best to specify a limit; otherwise Count will
// continue until it finishes counting or the provided context expires.
func (q *Query) Count(c context.Context) (int, error) {
// Check that the query is well-formed.
if q.err != nil {
return 0, q.err
}
// Run a copy of the query, with keysOnly true (if we're not a projection,
// since the two are incompatible), and an adjusted offset. We also set the
// limit to zero, as we don't want any actual entity data, just the number
// of skipped results.
newQ := q.clone()
newQ.keysOnly = len(newQ.projection) == 0
newQ.limit = 0
if q.limit < 0 {
// If the original query was unlimited, set the new query's offset to maximum.
newQ.offset = math.MaxInt32
} else {
newQ.offset = q.offset + q.limit
if newQ.offset < 0 {
// Do the best we can, in the presence of overflow.
newQ.offset = math.MaxInt32
}
}
req := &pb.Query{}
if err := newQ.toProto(req, internal.FullyQualifiedAppID(c)); err != nil {
return 0, err
}
res := &pb.QueryResult{}
if err := internal.Call(c, "datastore_v3", "RunQuery", req, res); err != nil {
return 0, err
}
// n is the count we will return. For example, suppose that our original
// query had an offset of 4 and a limit of 2008: the count will be 2008,
// provided that there are at least 2012 matching entities. However, the
// RPCs will only skip 1000 results at a time. The RPC sequence is:
// call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset
// response has (skippedResults, moreResults) = (1000, true)
// n += 1000 // n == 1000
// call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n
// response has (skippedResults, moreResults) = (1000, true)
// n += 1000 // n == 2000
// call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n
// response has (skippedResults, moreResults) = (12, false)
// n += 12 // n == 2012
// // exit the loop
// n -= 4 // n == 2008
var n int32
for {
// The QueryResult should have no actual entity data, just skipped results.
if len(res.Result) != 0 {
return 0, errors.New("datastore: internal error: Count request returned too much data")
}
n += res.GetSkippedResults()
if !res.GetMoreResults() {
break
}
if err := callNext(c, res, newQ.offset-n, q.count); err != nil {
return 0, err
}
}
n -= q.offset
if n < 0 {
// If the offset was greater than the number of matching entities,
// return 0 instead of negative.
n = 0
}
return int(n), nil
}
// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that
// returned by a query with more results.
func callNext(c context.Context, res *pb.QueryResult, offset, count int32) error {
if res.Cursor == nil {
return errors.New("datastore: internal error: server did not return a cursor")
}
req := &pb.NextRequest{
Cursor: res.Cursor,
}
if count >= 0 {
req.Count = proto.Int32(count)
}
if offset != 0 {
req.Offset = proto.Int32(offset)
}
if res.CompiledCursor != nil {
req.Compile = proto.Bool(true)
}
res.Reset()
return internal.Call(c, "datastore_v3", "Next", req, res)
}
// GetAll runs the query in the given context and returns all keys that match
// that query, as well as appending the values to dst.
//
// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
//
// As a special case, *PropertyList is an invalid type for dst, even though a
// PropertyList is a slice of structs. It is treated as invalid to avoid being
// mistakenly passed when *[]PropertyList was intended.
//
// The keys returned by GetAll will be in a 1-1 correspondence with the entities
// added to dst.
//
// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
//
// The running time and number of API calls made by GetAll scale linearly with
// the sum of the query's offset and limit. Unless the result count is
// expected to be small, it is best to specify a limit; otherwise GetAll will
// continue until it finishes collecting results or the provided context
// expires.
func (q *Query) GetAll(c context.Context, dst interface{}) ([]*Key, error) {
var (
dv reflect.Value
mat multiArgType
elemType reflect.Type
errFieldMismatch error
)
if !q.keysOnly {
dv = reflect.ValueOf(dst)
if dv.Kind() != reflect.Ptr || dv.IsNil() {
return nil, ErrInvalidEntityType
}
dv = dv.Elem()
mat, elemType = checkMultiArg(dv)
if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
return nil, ErrInvalidEntityType
}
}
var keys []*Key
for t := q.Run(c); ; {
k, e, err := t.next()
if err == Done {
break
}
if err != nil {
return keys, err
}
if !q.keysOnly {
ev := reflect.New(elemType)
if elemType.Kind() == reflect.Map {
// This is a special case. The zero values of a map type are
// not immediately useful; they have to be make'd.
//
// Funcs and channels are similar, in that a zero value is not useful,
// but even a freshly make'd channel isn't useful: there's no fixed
// channel buffer size that is always going to be large enough, and
// there's no goroutine to drain the other end. Theoretically, these
// types could be supported, for example by sniffing for a constructor
// method or requiring prior registration, but for now it's not a
// frequent enough concern to be worth it. Programmers can work around
// it by explicitly using Iterator.Next instead of the Query.GetAll
// convenience method.
x := reflect.MakeMap(elemType)
ev.Elem().Set(x)
}
if err = loadEntity(ev.Interface(), e); err != nil {
if _, ok := err.(*ErrFieldMismatch); ok {
// We continue loading entities even in the face of field mismatch errors.
// If we encounter any other error, that other error is returned. Otherwise,
// an ErrFieldMismatch is returned.
errFieldMismatch = err
} else {
return keys, err
}
}
if mat != multiArgTypeStructPtr {
ev = ev.Elem()
}
dv.Set(reflect.Append(dv, ev))
}
keys = append(keys, k)
}
return keys, errFieldMismatch
}
// Run runs the query in the given context.
func (q *Query) Run(c context.Context) *Iterator {
if q.err != nil {
return &Iterator{err: q.err}
}
t := &Iterator{
c: c,
limit: q.limit,
count: q.count,
q: q,
prevCC: q.start,
}
var req pb.Query
if err := q.toProto(&req, internal.FullyQualifiedAppID(c)); err != nil {
t.err = err
return t
}
if err := internal.Call(c, "datastore_v3", "RunQuery", &req, &t.res); err != nil {
t.err = err
return t
}
offset := q.offset - t.res.GetSkippedResults()
var count int32
if t.count > 0 && (t.limit < 0 || t.count < t.limit) {
count = t.count
} else {
count = t.limit
}
for offset > 0 && t.res.GetMoreResults() {
t.prevCC = t.res.CompiledCursor
if err := callNext(t.c, &t.res, offset, count); err != nil {
t.err = err
break
}
skip := t.res.GetSkippedResults()
if skip < 0 {
t.err = errors.New("datastore: internal error: negative number of skipped_results")
break
}
offset -= skip
}
if offset < 0 {
t.err = errors.New("datastore: internal error: query offset was overshot")
}
return t
}
// Iterator is the result of running a query.
type Iterator struct {
c context.Context
err error
// res is the result of the most recent RunQuery or Next API call.
res pb.QueryResult
// i is how many elements of res.Result we have iterated over.
i int
// limit is the limit on the number of results this iterator should return.
// A negative value means unlimited.
limit int32
// count is the number of results this iterator should fetch at once. This
// should be equal to or greater than zero.
count int32
// q is the original query which yielded this iterator.
q *Query
// prevCC is the compiled cursor that marks the end of the previous batch
// of results.
prevCC *pb.CompiledCursor
}
// Done is returned when a query iteration has completed.
var Done = errors.New("datastore: query has no more results")
// Next returns the key of the next result. When there are no more results,
// Done is returned as the error.
//
// If the query is not keys only and dst is non-nil, it also loads the entity
// stored for that key into the struct pointer or PropertyLoadSaver dst, with
// the same semantics and possible errors as for the Get function.
func (t *Iterator) Next(dst interface{}) (*Key, error) {
k, e, err := t.next()
if err != nil {
return nil, err
}
if dst != nil && !t.q.keysOnly {
err = loadEntity(dst, e)
}
return k, err
}
func (t *Iterator) next() (*Key, *pb.EntityProto, error) {
if t.err != nil {
return nil, nil, t.err
}
// Issue datastore_v3/Next RPCs as necessary.
for t.i == len(t.res.Result) {
if !t.res.GetMoreResults() {
t.err = Done
return nil, nil, t.err
}
t.prevCC = t.res.CompiledCursor
var count int32
if t.count > 0 && (t.limit < 0 || t.count < t.limit) {
count = t.count
} else {
count = t.limit
}
if err := callNext(t.c, &t.res, 0, count); err != nil {
t.err = err
return nil, nil, t.err
}
if t.res.GetSkippedResults() != 0 {
t.err = errors.New("datastore: internal error: iterator has skipped results")
return nil, nil, t.err
}
t.i = 0
if t.limit >= 0 {
t.limit -= int32(len(t.res.Result))
if t.limit < 0 {
t.err = errors.New("datastore: internal error: query returned more results than the limit")
return nil, nil, t.err
}
}
}
// Extract the key from the t.i'th element of t.res.Result.
e := t.res.Result[t.i]
t.i++
if e.Key == nil {
return nil, nil, errors.New("datastore: internal error: server did not return a key")
}
k, err := protoToKey(e.Key)
if err != nil || k.Incomplete() {
return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
}
return k, e, nil
}
// Cursor returns a cursor for the iterator's current location.
func (t *Iterator) Cursor() (Cursor, error) {
if t.err != nil && t.err != Done {
return Cursor{}, t.err
}
// If we are at either end of the current batch of results,
// return the compiled cursor at that end.
skipped := t.res.GetSkippedResults()
if t.i == 0 && skipped == 0 {
if t.prevCC == nil {
// A nil pointer (of type *pb.CompiledCursor) means no constraint:
// passing it as the end cursor of a new query means unlimited results
// (glossing over the integer limit parameter for now).
// A non-nil pointer to an empty pb.CompiledCursor means the start:
// passing it as the end cursor of a new query means 0 results.
// If prevCC was nil, then the original query had no start cursor, but
// Iterator.Cursor should return "the start" instead of unlimited.
return Cursor{&zeroCC}, nil
}
return Cursor{t.prevCC}, nil
}
if t.i == len(t.res.Result) {
return Cursor{t.res.CompiledCursor}, nil
}
// Otherwise, re-run the query offset to this iterator's position, starting from
// the most recent compiled cursor. This is done on a best-effort basis, as it
// is racy; if a concurrent process has added or removed entities, then the
// cursor returned may be inconsistent.
q := t.q.clone()
q.start = t.prevCC
q.offset = skipped + int32(t.i)
q.limit = 0
q.keysOnly = len(q.projection) == 0
t1 := q.Run(t.c)
_, _, err := t1.next()
if err != Done {
if err == nil {
err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results")
}
return Cursor{}, err
}
return Cursor{t1.res.CompiledCursor}, nil
}
var zeroCC pb.CompiledCursor
// Cursor is an iterator's position. It can be converted to and from an opaque
// string. A cursor can be used from different HTTP requests, but only with a
// query with the same kind, ancestor, filter and order constraints.
type Cursor struct {
cc *pb.CompiledCursor
}
// String returns a base-64 string representation of a cursor.
func (c Cursor) String() string {
if c.cc == nil {
return ""
}
b, err := proto.Marshal(c.cc)
if err != nil {
// The only way to construct a Cursor with a non-nil cc field is to
// unmarshal from the byte representation. We panic if the unmarshal
// succeeds but the marshaling of the unchanged protobuf value fails.
panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err))
}
return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
}
// Decode decodes a cursor from its base-64 string representation.
func DecodeCursor(s string) (Cursor, error) {
if s == "" {
return Cursor{&zeroCC}, nil
}
if n := len(s) % 4; n != 0 {
s += strings.Repeat("=", 4-n)
}
b, err := base64.URLEncoding.DecodeString(s)
if err != nil {
return Cursor{}, err
}
cc := &pb.CompiledCursor{}
if err := proto.Unmarshal(b, cc); err != nil {
return Cursor{}, err
}
return Cursor{cc}, nil
}

View File

@ -0,0 +1,584 @@
// Copyright 2011 Google Inc. All Rights Reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"errors"
"fmt"
"reflect"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine/internal"
"google.golang.org/appengine/internal/aetesting"
pb "google.golang.org/appengine/internal/datastore"
)
var (
path1 = &pb.Path{
Element: []*pb.Path_Element{
{
Type: proto.String("Gopher"),
Id: proto.Int64(6),
},
},
}
path2 = &pb.Path{
Element: []*pb.Path_Element{
{
Type: proto.String("Gopher"),
Id: proto.Int64(6),
},
{
Type: proto.String("Gopher"),
Id: proto.Int64(8),
},
},
}
)
func fakeRunQuery(in *pb.Query, out *pb.QueryResult) error {
expectedIn := &pb.Query{
App: proto.String("dev~fake-app"),
Kind: proto.String("Gopher"),
Compile: proto.Bool(true),
}
if !proto.Equal(in, expectedIn) {
return fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn)
}
*out = pb.QueryResult{
Result: []*pb.EntityProto{
{
Key: &pb.Reference{
App: proto.String("s~test-app"),
Path: path1,
},
EntityGroup: path1,
Property: []*pb.Property{
{
Meaning: pb.Property_TEXT.Enum(),
Name: proto.String("Name"),
Value: &pb.PropertyValue{
StringValue: proto.String("George"),
},
},
{
Name: proto.String("Height"),
Value: &pb.PropertyValue{
Int64Value: proto.Int64(32),
},
},
},
},
{
Key: &pb.Reference{
App: proto.String("s~test-app"),
Path: path2,
},
EntityGroup: path1, // ancestor is George
Property: []*pb.Property{
{
Meaning: pb.Property_TEXT.Enum(),
Name: proto.String("Name"),
Value: &pb.PropertyValue{
StringValue: proto.String("Rufus"),
},
},
// No height for Rufus.
},
},
},
MoreResults: proto.Bool(false),
}
return nil
}
type StructThatImplementsPLS struct{}
func (StructThatImplementsPLS) Load(p []Property) error { return nil }
func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
var _ PropertyLoadSaver = StructThatImplementsPLS{}
type StructPtrThatImplementsPLS struct{}
func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil }
func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{}
type PropertyMap map[string]Property
func (m PropertyMap) Load(props []Property) error {
for _, p := range props {
if p.Multiple {
return errors.New("PropertyMap does not support multiple properties")
}
m[p.Name] = p
}
return nil
}
func (m PropertyMap) Save() ([]Property, error) {
props := make([]Property, 0, len(m))
for _, p := range m {
if p.Multiple {
return nil, errors.New("PropertyMap does not support multiple properties")
}
props = append(props, p)
}
return props, nil
}
var _ PropertyLoadSaver = PropertyMap{}
type Gopher struct {
Name string
Height int
}
// typeOfEmptyInterface is the type of interface{}, but we can't use
// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an
// interface{}.
var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem()
func TestCheckMultiArg(t *testing.T) {
testCases := []struct {
v interface{}
mat multiArgType
elemType reflect.Type
}{
// Invalid cases.
{nil, multiArgTypeInvalid, nil},
{Gopher{}, multiArgTypeInvalid, nil},
{&Gopher{}, multiArgTypeInvalid, nil},
{PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case.
{PropertyMap{}, multiArgTypeInvalid, nil},
{[]*PropertyList(nil), multiArgTypeInvalid, nil},
{[]*PropertyMap(nil), multiArgTypeInvalid, nil},
{[]**Gopher(nil), multiArgTypeInvalid, nil},
{[]*interface{}(nil), multiArgTypeInvalid, nil},
// Valid cases.
{
[]PropertyList(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(PropertyList{}),
},
{
[]PropertyMap(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(PropertyMap{}),
},
{
[]StructThatImplementsPLS(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(StructThatImplementsPLS{}),
},
{
[]StructPtrThatImplementsPLS(nil),
multiArgTypePropertyLoadSaver,
reflect.TypeOf(StructPtrThatImplementsPLS{}),
},
{
[]Gopher(nil),
multiArgTypeStruct,
reflect.TypeOf(Gopher{}),
},
{
[]*Gopher(nil),
multiArgTypeStructPtr,
reflect.TypeOf(Gopher{}),
},
{
[]interface{}(nil),
multiArgTypeInterface,
typeOfEmptyInterface,
},
}
for _, tc := range testCases {
mat, elemType := checkMultiArg(reflect.ValueOf(tc.v))
if mat != tc.mat || elemType != tc.elemType {
t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v",
tc.v, mat, elemType, tc.mat, tc.elemType)
}
}
}
func TestSimpleQuery(t *testing.T) {
struct1 := Gopher{Name: "George", Height: 32}
struct2 := Gopher{Name: "Rufus"}
pList1 := PropertyList{
{
Name: "Name",
Value: "George",
},
{
Name: "Height",
Value: int64(32),
},
}
pList2 := PropertyList{
{
Name: "Name",
Value: "Rufus",
},
}
pMap1 := PropertyMap{
"Name": Property{
Name: "Name",
Value: "George",
},
"Height": Property{
Name: "Height",
Value: int64(32),
},
}
pMap2 := PropertyMap{
"Name": Property{
Name: "Name",
Value: "Rufus",
},
}
testCases := []struct {
dst interface{}
want interface{}
}{
// The destination must have type *[]P, *[]S or *[]*S, for some non-interface
// type P such that *P implements PropertyLoadSaver, or for some struct type S.
{new([]Gopher), &[]Gopher{struct1, struct2}},
{new([]*Gopher), &[]*Gopher{&struct1, &struct2}},
{new([]PropertyList), &[]PropertyList{pList1, pList2}},
{new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}},
// Any other destination type is invalid.
{0, nil},
{Gopher{}, nil},
{PropertyList{}, nil},
{PropertyMap{}, nil},
{[]int{}, nil},
{[]Gopher{}, nil},
{[]PropertyList{}, nil},
{new(int), nil},
{new(Gopher), nil},
{new(PropertyList), nil}, // This is a special case.
{new(PropertyMap), nil},
{new([]int), nil},
{new([]map[int]int), nil},
{new([]map[string]Property), nil},
{new([]map[string]interface{}), nil},
{new([]*int), nil},
{new([]*map[int]int), nil},
{new([]*map[string]Property), nil},
{new([]*map[string]interface{}), nil},
{new([]**Gopher), nil},
{new([]*PropertyList), nil},
{new([]*PropertyMap), nil},
}
for _, tc := range testCases {
nCall := 0
c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
nCall++
return fakeRunQuery(in, out)
})
c = internal.WithAppIDOverride(c, "dev~fake-app")
var (
expectedErr error
expectedNCall int
)
if tc.want == nil {
expectedErr = ErrInvalidEntityType
} else {
expectedNCall = 1
}
keys, err := NewQuery("Gopher").GetAll(c, tc.dst)
if err != expectedErr {
t.Errorf("dst type %T: got error [%v], want [%v]", tc.dst, err, expectedErr)
continue
}
if nCall != expectedNCall {
t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall)
continue
}
if err != nil {
continue
}
key1 := NewKey(c, "Gopher", "", 6, nil)
expectedKeys := []*Key{
key1,
NewKey(c, "Gopher", "", 8, key1),
}
if l1, l2 := len(keys), len(expectedKeys); l1 != l2 {
t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2)
continue
}
for i, key := range keys {
if key.AppID() != "s~test-app" {
t.Errorf(`dst type %T: Key #%d's AppID = %q, want "s~test-app"`, tc.dst, i, key.AppID())
continue
}
if !keysEqual(key, expectedKeys[i]) {
t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i])
continue
}
}
if !reflect.DeepEqual(tc.dst, tc.want) {
t.Errorf("dst type %T: Entities got %+v, want %+v", tc.dst, tc.dst, tc.want)
continue
}
}
}
// keysEqual is like (*Key).Equal, but ignores the App ID.
func keysEqual(a, b *Key) bool {
for a != nil && b != nil {
if a.Kind() != b.Kind() || a.StringID() != b.StringID() || a.IntID() != b.IntID() {
return false
}
a, b = a.Parent(), b.Parent()
}
return a == b
}
func TestQueriesAreImmutable(t *testing.T) {
// Test that deriving q2 from q1 does not modify q1.
q0 := NewQuery("foo")
q1 := NewQuery("foo")
q2 := q1.Offset(2)
if !reflect.DeepEqual(q0, q1) {
t.Errorf("q0 and q1 were not equal")
}
if reflect.DeepEqual(q1, q2) {
t.Errorf("q1 and q2 were equal")
}
// Test that deriving from q4 twice does not conflict, even though
// q4 has a long list of order clauses. This tests that the arrays
// backed by a query's slice of orders are not shared.
f := func() *Query {
q := NewQuery("bar")
// 47 is an ugly number that is unlikely to be near a re-allocation
// point in repeated append calls. For example, it's not near a power
// of 2 or a multiple of 10.
for i := 0; i < 47; i++ {
q = q.Order(fmt.Sprintf("x%d", i))
}
return q
}
q3 := f().Order("y")
q4 := f()
q5 := q4.Order("y")
q6 := q4.Order("z")
if !reflect.DeepEqual(q3, q5) {
t.Errorf("q3 and q5 were not equal")
}
if reflect.DeepEqual(q5, q6) {
t.Errorf("q5 and q6 were equal")
}
}
func TestFilterParser(t *testing.T) {
testCases := []struct {
filterStr string
wantOK bool
wantFieldName string
wantOp operator
}{
// Supported ops.
{"x<", true, "x", lessThan},
{"x <", true, "x", lessThan},
{"x <", true, "x", lessThan},
{" x < ", true, "x", lessThan},
{"x <=", true, "x", lessEq},
{"x =", true, "x", equal},
{"x >=", true, "x", greaterEq},
{"x >", true, "x", greaterThan},
{"in >", true, "in", greaterThan},
{"in>", true, "in", greaterThan},
// Valid but (currently) unsupported ops.
{"x!=", false, "", 0},
{"x !=", false, "", 0},
{" x != ", false, "", 0},
{"x IN", false, "", 0},
{"x in", false, "", 0},
// Invalid ops.
{"x EQ", false, "", 0},
{"x lt", false, "", 0},
{"x <>", false, "", 0},
{"x >>", false, "", 0},
{"x ==", false, "", 0},
{"x =<", false, "", 0},
{"x =>", false, "", 0},
{"x !", false, "", 0},
{"x ", false, "", 0},
{"x", false, "", 0},
}
for _, tc := range testCases {
q := NewQuery("foo").Filter(tc.filterStr, 42)
if ok := q.err == nil; ok != tc.wantOK {
t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK)
continue
}
if !tc.wantOK {
continue
}
if len(q.filter) != 1 {
t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1)
continue
}
got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42}
if got != want {
t.Errorf("%q: got %v, want %v", tc.filterStr, got, want)
continue
}
}
}
func TestQueryToProto(t *testing.T) {
// The context is required to make Keys for the test cases.
var got *pb.Query
NoErr := errors.New("No error")
c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
got = in
return NoErr // return a non-nil error so Run doesn't keep going.
})
c = internal.WithAppIDOverride(c, "dev~fake-app")
testCases := []struct {
desc string
query *Query
want *pb.Query
err string
}{
{
desc: "empty",
query: NewQuery(""),
want: &pb.Query{},
},
{
desc: "standard query",
query: NewQuery("kind").Order("-I").Filter("I >", 17).Filter("U =", "Dave").Limit(7).Offset(42).BatchSize(5),
want: &pb.Query{
Kind: proto.String("kind"),
Filter: []*pb.Query_Filter{
{
Op: pb.Query_Filter_GREATER_THAN.Enum(),
Property: []*pb.Property{
{
Name: proto.String("I"),
Value: &pb.PropertyValue{Int64Value: proto.Int64(17)},
Multiple: proto.Bool(false),
},
},
},
{
Op: pb.Query_Filter_EQUAL.Enum(),
Property: []*pb.Property{
{
Name: proto.String("U"),
Value: &pb.PropertyValue{StringValue: proto.String("Dave")},
Multiple: proto.Bool(false),
},
},
},
},
Order: []*pb.Query_Order{
{
Property: proto.String("I"),
Direction: pb.Query_Order_DESCENDING.Enum(),
},
},
Limit: proto.Int32(7),
Offset: proto.Int32(42),
Count: proto.Int32(5),
},
},
{
desc: "ancestor",
query: NewQuery("").Ancestor(NewKey(c, "kind", "Mummy", 0, nil)),
want: &pb.Query{
Ancestor: &pb.Reference{
App: proto.String("dev~fake-app"),
Path: &pb.Path{
Element: []*pb.Path_Element{{Type: proto.String("kind"), Name: proto.String("Mummy")}},
},
},
},
},
{
desc: "projection",
query: NewQuery("").Project("A", "B"),
want: &pb.Query{
PropertyName: []string{"A", "B"},
},
},
{
desc: "projection with distinct",
query: NewQuery("").Project("A", "B").Distinct(),
want: &pb.Query{
PropertyName: []string{"A", "B"},
GroupByPropertyName: []string{"A", "B"},
},
},
{
desc: "keys only",
query: NewQuery("").KeysOnly(),
want: &pb.Query{
KeysOnly: proto.Bool(true),
RequirePerfectPlan: proto.Bool(true),
},
},
{
desc: "empty filter",
query: NewQuery("kind").Filter("=", 17),
err: "empty query filter field nam",
},
{
desc: "bad filter type",
query: NewQuery("kind").Filter("M =", map[string]bool{}),
err: "bad query filter value type",
},
{
desc: "bad filter operator",
query: NewQuery("kind").Filter("I <<=", 17),
err: `invalid operator "<<=" in filter "I <<="`,
},
{
desc: "empty order",
query: NewQuery("kind").Order(""),
err: "empty order",
},
{
desc: "bad order direction",
query: NewQuery("kind").Order("+I"),
err: `invalid order: "+I`,
},
}
for _, tt := range testCases {
got = nil
if _, err := tt.query.Run(c).Next(nil); err != NoErr {
if tt.err == "" || !strings.Contains(err.Error(), tt.err) {
t.Errorf("%s: error %v, want %q", tt.desc, err, tt.err)
}
continue
}
if tt.err != "" {
t.Errorf("%s: no error, want %q", tt.desc, tt.err)
continue
}
// Fields that are common to all protos.
tt.want.App = proto.String("dev~fake-app")
tt.want.Compile = proto.Bool(true)
if !proto.Equal(got, tt.want) {
t.Errorf("%s:\ngot %v\nwant %v", tt.desc, got, tt.want)
}
}
}

333
vendor/google.golang.org/appengine/datastore/save.go generated vendored Normal file
View File

@ -0,0 +1,333 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"errors"
"fmt"
"math"
"reflect"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/appengine"
pb "google.golang.org/appengine/internal/datastore"
)
func toUnixMicro(t time.Time) int64 {
// We cannot use t.UnixNano() / 1e3 because we want to handle times more than
// 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
// be represented in the numerator of a single int64 divide.
return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
}
func fromUnixMicro(t int64) time.Time {
return time.Unix(t/1e6, (t%1e6)*1e3).UTC()
}
var (
minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
)
// valueToProto converts a named value to a newly allocated Property.
// The returned error string is empty on success.
func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {
var (
pv pb.PropertyValue
unsupported bool
)
switch v.Kind() {
case reflect.Invalid:
// No-op.
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
pv.Int64Value = proto.Int64(v.Int())
case reflect.Bool:
pv.BooleanValue = proto.Bool(v.Bool())
case reflect.String:
pv.StringValue = proto.String(v.String())
case reflect.Float32, reflect.Float64:
pv.DoubleValue = proto.Float64(v.Float())
case reflect.Ptr:
if k, ok := v.Interface().(*Key); ok {
if k != nil {
pv.Referencevalue = keyToReferenceValue(defaultAppID, k)
}
} else {
unsupported = true
}
case reflect.Struct:
switch t := v.Interface().(type) {
case time.Time:
if t.Before(minTime) || t.After(maxTime) {
return nil, "time value out of range"
}
pv.Int64Value = proto.Int64(toUnixMicro(t))
case appengine.GeoPoint:
if !t.Valid() {
return nil, "invalid GeoPoint value"
}
// NOTE: Strangely, latitude maps to X, longitude to Y.
pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}
default:
unsupported = true
}
case reflect.Slice:
if b, ok := v.Interface().([]byte); ok {
pv.StringValue = proto.String(string(b))
} else {
// nvToProto should already catch slice values.
// If we get here, we have a slice of slice values.
unsupported = true
}
default:
unsupported = true
}
if unsupported {
return nil, "unsupported datastore value type: " + v.Type().String()
}
p = &pb.Property{
Name: proto.String(name),
Value: &pv,
Multiple: proto.Bool(multiple),
}
if v.IsValid() {
switch v.Interface().(type) {
case []byte:
p.Meaning = pb.Property_BLOB.Enum()
case ByteString:
p.Meaning = pb.Property_BYTESTRING.Enum()
case appengine.BlobKey:
p.Meaning = pb.Property_BLOBKEY.Enum()
case time.Time:
p.Meaning = pb.Property_GD_WHEN.Enum()
case appengine.GeoPoint:
p.Meaning = pb.Property_GEORSS_POINT.Enum()
}
}
return p, ""
}
type saveOpts struct {
noIndex bool
multiple bool
omitEmpty bool
}
// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {
var err error
var props []Property
if e, ok := src.(PropertyLoadSaver); ok {
props, err = e.Save()
} else {
props, err = SaveStruct(src)
}
if err != nil {
return nil, err
}
return propertiesToProto(defaultAppID, key, props)
}
func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect.Value) error {
if opts.omitEmpty && isEmptyValue(v) {
return nil
}
p := Property{
Name: name,
NoIndex: opts.noIndex,
Multiple: opts.multiple,
}
switch x := v.Interface().(type) {
case *Key:
p.Value = x
case time.Time:
p.Value = x
case appengine.BlobKey:
p.Value = x
case appengine.GeoPoint:
p.Value = x
case ByteString:
p.Value = x
default:
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
p.Value = v.Int()
case reflect.Bool:
p.Value = v.Bool()
case reflect.String:
p.Value = v.String()
case reflect.Float32, reflect.Float64:
p.Value = v.Float()
case reflect.Slice:
if v.Type().Elem().Kind() == reflect.Uint8 {
p.NoIndex = true
p.Value = v.Bytes()
}
case reflect.Struct:
if !v.CanAddr() {
return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
}
sub, err := newStructPLS(v.Addr().Interface())
if err != nil {
return fmt.Errorf("datastore: unsupported struct field: %v", err)
}
return sub.save(props, name+".", opts)
}
}
if p.Value == nil {
return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
}
*props = append(*props, p)
return nil
}
func (s structPLS) Save() ([]Property, error) {
var props []Property
if err := s.save(&props, "", saveOpts{}); err != nil {
return nil, err
}
return props, nil
}
func (s structPLS) save(props *[]Property, prefix string, opts saveOpts) error {
for name, f := range s.codec.fields {
name = prefix + name
v := s.v.FieldByIndex(f.path)
if !v.IsValid() || !v.CanSet() {
continue
}
var opts1 saveOpts
opts1.noIndex = opts.noIndex || f.noIndex
opts1.multiple = opts.multiple
opts1.omitEmpty = f.omitEmpty // don't propagate
// For slice fields that aren't []byte, save each element.
if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
opts1.multiple = true
for j := 0; j < v.Len(); j++ {
if err := saveStructProperty(props, name, opts1, v.Index(j)); err != nil {
return err
}
}
continue
}
// Otherwise, save the field itself.
if err := saveStructProperty(props, name, opts1, v); err != nil {
return err
}
}
return nil
}
func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {
e := &pb.EntityProto{
Key: keyToProto(defaultAppID, key),
}
if key.parent == nil {
e.EntityGroup = &pb.Path{}
} else {
e.EntityGroup = keyToProto(defaultAppID, key.root()).Path
}
prevMultiple := make(map[string]bool)
for _, p := range props {
if pm, ok := prevMultiple[p.Name]; ok {
if !pm || !p.Multiple {
return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name)
}
} else {
prevMultiple[p.Name] = p.Multiple
}
x := &pb.Property{
Name: proto.String(p.Name),
Value: new(pb.PropertyValue),
Multiple: proto.Bool(p.Multiple),
}
switch v := p.Value.(type) {
case int64:
x.Value.Int64Value = proto.Int64(v)
case bool:
x.Value.BooleanValue = proto.Bool(v)
case string:
x.Value.StringValue = proto.String(v)
if p.NoIndex {
x.Meaning = pb.Property_TEXT.Enum()
}
case float64:
x.Value.DoubleValue = proto.Float64(v)
case *Key:
if v != nil {
x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)
}
case time.Time:
if v.Before(minTime) || v.After(maxTime) {
return nil, fmt.Errorf("datastore: time value out of range")
}
x.Value.Int64Value = proto.Int64(toUnixMicro(v))
x.Meaning = pb.Property_GD_WHEN.Enum()
case appengine.BlobKey:
x.Value.StringValue = proto.String(string(v))
x.Meaning = pb.Property_BLOBKEY.Enum()
case appengine.GeoPoint:
if !v.Valid() {
return nil, fmt.Errorf("datastore: invalid GeoPoint value")
}
// NOTE: Strangely, latitude maps to X, longitude to Y.
x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}
x.Meaning = pb.Property_GEORSS_POINT.Enum()
case []byte:
x.Value.StringValue = proto.String(string(v))
x.Meaning = pb.Property_BLOB.Enum()
if !p.NoIndex {
return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name)
}
case ByteString:
x.Value.StringValue = proto.String(string(v))
x.Meaning = pb.Property_BYTESTRING.Enum()
default:
if p.Value != nil {
return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name)
}
}
if p.NoIndex {
e.RawProperty = append(e.RawProperty, x)
} else {
e.Property = append(e.Property, x)
if len(e.Property) > maxIndexedProperties {
return nil, errors.New("datastore: too many indexed properties")
}
}
}
return e, nil
}
// isEmptyValue is taken from the encoding/json package in the standard library.
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
// TODO(perfomance): Only reflect.String needed, other property types are not supported (copy/paste from json package)
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
// TODO(perfomance): Uint* are unsupported property types - should be removed (copy/paste from json package)
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
case reflect.Struct:
switch x := v.Interface().(type) {
case time.Time:
return x.IsZero()
}
}
return false
}

View File

@ -0,0 +1,65 @@
// Copyright 2012 Google Inc. All Rights Reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"testing"
"time"
)
func TestUnixMicro(t *testing.T) {
// Test that all these time.Time values survive a round trip to unix micros.
testCases := []time.Time{
{},
time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC),
time.Unix(-1e6, -1000),
time.Unix(-1e6, 0),
time.Unix(-1e6, +1000),
time.Unix(-60, -1000),
time.Unix(-60, 0),
time.Unix(-60, +1000),
time.Unix(-1, -1000),
time.Unix(-1, 0),
time.Unix(-1, +1000),
time.Unix(0, -3000),
time.Unix(0, -2000),
time.Unix(0, -1000),
time.Unix(0, 0),
time.Unix(0, +1000),
time.Unix(0, +2000),
time.Unix(+60, -1000),
time.Unix(+60, 0),
time.Unix(+60, +1000),
time.Unix(+1e6, -1000),
time.Unix(+1e6, 0),
time.Unix(+1e6, +1000),
time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC),
time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC),
time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC),
time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC),
}
for _, tc := range testCases {
got := fromUnixMicro(toUnixMicro(tc))
if !got.Equal(tc) {
t.Errorf("got %q, want %q", got, tc)
}
}
// Test that a time.Time that isn't an integral number of microseconds
// is not perfectly reconstructed after a round trip.
t0 := time.Unix(0, 123)
t1 := fromUnixMicro(toUnixMicro(t0))
if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 {
t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond())
}
}

View File

@ -0,0 +1,96 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by the Apache 2.0
// license that can be found in the LICENSE file.
package datastore
import (
"errors"
"golang.org/x/net/context"
"google.golang.org/appengine/internal"
pb "google.golang.org/appengine/internal/datastore"
)
func init() {
internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {
x.Transaction = t
})
internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {
x.Transaction = t
})
internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {
x.Transaction = t
})
internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {
x.Transaction = t
})
}
// ErrConcurrentTransaction is returned when a transaction is rolled back due
// to a conflict with a concurrent transaction.
var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
// RunInTransaction runs f in a transaction. It calls f with a transaction
// context tc that f should use for all App Engine operations.
//
// If f returns nil, RunInTransaction attempts to commit the transaction,
// returning nil if it succeeds. If the commit fails due to a conflicting
// transaction, RunInTransaction retries f, each time with a new transaction
// context. It gives up and returns ErrConcurrentTransaction after three
// failed attempts. The number of attempts can be configured by specifying
// TransactionOptions.Attempts.
//
// If f returns non-nil, then any datastore changes will not be applied and
// RunInTransaction returns that same error. The function f is not retried.
//
// Note that when f returns, the transaction is not yet committed. Calling code
// must be careful not to assume that any of f's changes have been committed
// until RunInTransaction returns nil.
//
// Since f may be called multiple times, f should usually be idempotent.
// datastore.Get is not idempotent when unmarshaling slice fields.
//
// Nested transactions are not supported; c may not be a transaction context.
func RunInTransaction(c context.Context, f func(tc context.Context) error, opts *TransactionOptions) error {
xg := false
if opts != nil {
xg = opts.XG
}
readOnly := false
if opts != nil {
readOnly = opts.ReadOnly
}
attempts := 3
if opts != nil && opts.Attempts > 0 {
attempts = opts.Attempts
}
var t *pb.Transaction
var err error
for i := 0; i < attempts; i++ {
if t, err = internal.RunTransactionOnce(c, f, xg, readOnly, t); err != internal.ErrConcurrentTransaction {
return err
}
}
return ErrConcurrentTransaction
}
// TransactionOptions are the options for running a transaction.
type TransactionOptions struct {
// XG is whether the transaction can cross multiple entity groups. In
// comparison, a single group transaction is one where all datastore keys
// used have the same root key. Note that cross group transactions do not
// have the same behavior as single group transactions. In particular, it
// is much more likely to see partially applied transactions in different
// entity groups, in global queries.
// It is valid to set XG to true even if the transaction is within a
// single entity group.
XG bool
// Attempts controls the number of retries to perform when commits fail
// due to a conflicting transaction. If omitted, it defaults to 3.
Attempts int
// ReadOnly controls whether the transaction is a read only transaction.
// Read only transactions are potentially more efficient.
ReadOnly bool
}