Newer
Older
Manish R Jain
committed
/*
Manish R Jain
committed
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package query
Manish R Jain
committed
"container/heap"
Manish R Jain
committed
"encoding/json"
"time"
Manish R Jain
committed
"github.com/Sirupsen/logrus"
Manish R Jain
committed
"github.com/dgraph-io/dgraph/gql"
"github.com/dgraph-io/dgraph/query/protocolbuffer"
"github.com/dgraph-io/dgraph/worker"
"github.com/golang/protobuf/proto"
Manish R Jain
committed
"github.com/google/flatbuffers/go"
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
/*
* QUERY:
* Let's take this query from GraphQL as example:
* {
* me {
* id
* firstName
* lastName
* birthday {
* month
* day
* }
* friends {
* name
* }
* }
* }
*
* REPRESENTATION:
* This would be represented in SubGraph format internally, as such:
* SubGraph [result uid = me]
* |
* Children
* |
* --> SubGraph [Attr = "xid"]
* --> SubGraph [Attr = "firstName"]
* --> SubGraph [Attr = "lastName"]
* --> SubGraph [Attr = "birthday"]
* |
* Children
* |
* --> SubGraph [Attr = "month"]
* --> SubGraph [Attr = "day"]
* --> SubGraph [Attr = "friends"]
* |
* Children
* |
* --> SubGraph [Attr = "name"]
*
* ALGORITHM:
* This is a rough and simple algorithm of how to process this SubGraph query
* and populate the results:
*
* For a given entity, a new SubGraph can be started off with NewGraph(id).
* Given a SubGraph, is the Query field empty? [Step a]
* - If no, run (or send it to server serving the attribute) query
* and populate result.
* Iterate over children and copy Result Uids to child Query Uids.
* Set Attr. Then for each child, use goroutine to run Step:a.
* Wait for goroutines to finish.
* Return errors, if any.
*/
Manish R Jain
committed
var glog = x.Log("query")
type Latency struct {
Start time.Time `json:"-"`
Parsing time.Duration `json:"query_parsing"`
Processing time.Duration `json:"processing"`
Json time.Duration `json:"json_conversion"`
}
func (l *Latency) ToMap() map[string]string {
m := make(map[string]string)
j := time.Since(l.Start) - l.Processing - l.Parsing
m["parsing"] = l.Parsing.String()
m["processing"] = l.Processing.String()
m["json"] = j.String()
m["total"] = time.Since(l.Start).String()
return m
}
// SubGraph is the way to represent data internally. It contains both the
// query and the response. Once generated, this can then be encoded to other
// client convenient formats, like GraphQL / JSON.
type SubGraph struct {
Attr string
Children []*SubGraph
query []byte
result []byte
}
Manish R Jain
committed
func mergeInterfaces(i1 interface{}, i2 interface{}) interface{} {
switch i1.(type) {
case map[string]interface{}:
m1 := i1.(map[string]interface{})
if m2, ok := i2.(map[string]interface{}); ok {
for k1, v1 := range m1 {
m2[k1] = v1
}
return m2
}
break
}
glog.Debugf("Got type: %v %v", reflect.TypeOf(i1), reflect.TypeOf(i2))
glog.Debugf("Got values: %v %v", i1, i2)
Manish R Jain
committed
return []interface{}{i1, i2}
}
func postTraverse(g *SubGraph) (result map[uint64]interface{}, rerr error) {
if len(g.query) == 0 {
return result, nil
}
Manish R Jain
committed
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
result = make(map[uint64]interface{})
// Get results from all children first.
cResult := make(map[uint64]interface{})
for _, child := range g.Children {
m, err := postTraverse(child)
if err != nil {
x.Err(glog, err).Error("Error while traversal")
return result, err
}
// Merge results from all children, one by one.
for k, v := range m {
if val, present := cResult[k]; !present {
cResult[k] = v
} else {
cResult[k] = mergeInterfaces(val, v)
}
}
}
// Now read the query and results at current node.
uo := flatbuffers.GetUOffsetT(g.query)
q := new(task.Query)
q.Init(g.query, uo)
ro := flatbuffers.GetUOffsetT(g.result)
r := new(task.Result)
r.Init(g.result, ro)
if q.UidsLength() != r.UidmatrixLength() {
glog.Fatalf("Result uidmatrixlength: %v. Query uidslength: %v",
Manish R Jain
committed
r.UidmatrixLength(), q.UidsLength())
}
if q.UidsLength() != r.ValuesLength() {
glog.Fatalf("Result valuelength: %v. Query uidslength: %v",
r.ValuesLength(), q.UidsLength())
}
var ul task.UidList
for i := 0; i < r.UidmatrixLength(); i++ {
if ok := r.Uidmatrix(&ul, i); !ok {
return result, fmt.Errorf("While parsing UidList")
}
l := make([]interface{}, ul.UidsLength())
for j := 0; j < ul.UidsLength(); j++ {
uid := ul.Uids(j)
Manish R Jain
committed
m["_uid_"] = fmt.Sprintf("%#x", uid)
Manish R Jain
committed
if ival, present := cResult[uid]; !present {
Manish R Jain
committed
} else {
Manish R Jain
committed
}
}
if len(l) > 0 {
m := make(map[string]interface{})
m[g.Attr] = l
result[q.Uids(i)] = m
Manish R Jain
committed
}
}
var tv task.Value
for i := 0; i < r.ValuesLength(); i++ {
if ok := r.Values(&tv, i); !ok {
return result, fmt.Errorf("While parsing value")
}
var ival interface{}
if err := posting.ParseValue(&ival, tv.ValBytes()); err != nil {
Manish R Jain
committed
return result, err
}
Manish R Jain
committed
if pval, present := result[q.Uids(i)]; present {
Manish R Jain
committed
WithField("_uid_", q.Uids(i)).
WithField("new", ival).
Fatal("Previous value detected.")
Manish R Jain
committed
}
m := make(map[string]interface{})
Manish R Jain
committed
m["_uid_"] = fmt.Sprintf("%#x", q.Uids(i))
Manish R Jain
committed
"_uid_": q.Uids(i),
"val": ival,
Manish R Jain
committed
m[g.Attr] = ival
result[q.Uids(i)] = m
}
return result, nil
}
func (g *SubGraph) ToJson(l *Latency) (js []byte, rerr error) {
Manish R Jain
committed
r, err := postTraverse(g)
if err != nil {
x.Err(glog, err).Error("While doing traversal")
return js, err
}
l.Json = time.Since(l.Start) - l.Parsing - l.Processing
Manish R Jain
committed
if len(r) == 1 {
for _, ival := range r {
m := ival.(map[string]interface{})
m["server_latency"] = l.ToMap()
return json.Marshal(m)
Manish R Jain
committed
}
} else {
glog.Fatal("We don't currently support more than 1 uid at root.")
}
Manish R Jain
committed
return json.Marshal(r)
}
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
func preTraverse(g *SubGraph) (sg *protocolbuffer.SubGraph, rerr error) {
sg = &protocolbuffer.SubGraph{}
sg.Attr = g.Attr
ro := flatbuffers.GetUOffsetT(g.result)
r := new(task.Result)
r.Init(g.result, ro)
var ul task.UidList
result := &protocolbuffer.Result{}
for i := 0; i < r.UidmatrixLength(); i++ {
if ok := r.Uidmatrix(&ul, i); !ok {
return sg, fmt.Errorf("While parsing UidList")
}
uidList := &protocolbuffer.UidList{}
for j := 0; j < ul.UidsLength(); j++ {
uid := ul.Uids(j)
uidList.Uids = append(uidList.Uids, uid)
}
result.Uidmatrix = append(result.Uidmatrix, uidList)
}
var tv task.Value
for i := 0; i < r.ValuesLength(); i++ {
if ok := r.Values(&tv, i); !ok {
return sg, fmt.Errorf("While parsing value")
}
var ival interface{}
if err := posting.ParseValue(&ival, tv.ValBytes()); err != nil {
return sg, err
}
if ival == nil {
continue
}
result.Values = append(result.Values, []byte(ival.(string)))
}
sg.Result = result
for _, child := range g.Children {
childSg, err := preTraverse(child)
if err != nil {
x.Err(glog, err).Error("Error while traversal")
return sg, err
}
sg.Children = append(sg.Children, childSg)
}
return sg, nil
}
func (g *SubGraph) ToProtocolBuffer() (pb []byte, rerr error) {
sg, err := preTraverse(g)
if err != nil {
x.Err(glog, err).Error("Error while traversal")
return pb, err
}
pb, err = proto.Marshal(sg)
if err != nil {
x.Err(glog, err).Error("Error while marshalling to protocol buffer")
return pb, err
}
return pb, nil
}
Manish R Jain
committed
func treeCopy(gq *gql.GraphQuery, sg *SubGraph) {
for _, gchild := range gq.Children {
dst := new(SubGraph)
dst.Attr = gchild.Attr
sg.Children = append(sg.Children, dst)
treeCopy(gchild, dst)
}
}
func ToSubGraph(gq *gql.GraphQuery) (*SubGraph, error) {
sg, err := newGraph(gq.UID, gq.XID)
Manish R Jain
committed
if err != nil {
return nil, err
}
treeCopy(gq, sg)
return sg, nil
}
func newGraph(euid uint64, exid string) (*SubGraph, error) {
// This would set the Result field in SubGraph,
// and populate the children for attributes.
if len(exid) > 0 {
Manish R Jain
committed
xidToUid := make(map[string]uint64)
xidToUid[exid] = 0
if err := worker.GetOrAssignUidsOverNetwork(&xidToUid); err != nil {
glog.WithError(err).Error("While getting uids over network")
return nil, err
Manish R Jain
committed
euid = xidToUid[exid]
glog.WithField("xid", exid).WithField("uid", euid).Debug("GetOrAssign")
if euid == 0 {
err := fmt.Errorf("Query internal id is zero")
Manish R Jain
committed
x.Err(glog, err).Error("Invalid query")
return nil, err
// Encode uid into result flatbuffer.
b := flatbuffers.NewBuilder(0)
Manish R Jain
committed
omatrix := x.UidlistOffset(b, []uint64{euid})
Manish R Jain
committed
// Also need to add nil value to keep this consistent.
var voffset flatbuffers.UOffsetT
{
bvo := b.CreateByteVector(x.Nilbyte)
Manish R Jain
committed
task.ValueStart(b)
Manish R Jain
committed
task.ValueAddVal(b, bvo)
voffset = task.ValueEnd(b)
}
Manish R Jain
committed
task.ResultStartUidmatrixVector(b, 1)
b.PrependUOffsetT(omatrix)
mend := b.EndVector(1)
Manish R Jain
committed
task.ResultStartValuesVector(b, 1)
b.PrependUOffsetT(voffset)
vend := b.EndVector(1)
task.ResultStart(b)
Manish R Jain
committed
task.ResultAddUidmatrix(b, mend)
Manish R Jain
committed
task.ResultAddValues(b, vend)
rend := task.ResultEnd(b)
b.Finish(rend)
sg := new(SubGraph)
Manish R Jain
committed
sg.Attr = "_root_"
sg.result = b.Bytes[b.Head():]
Manish R Jain
committed
// Also add query for consistency and to allow for ToJson() later.
sg.query = createTaskQuery(sg.Attr, []uint64{euid})
return sg, nil
Manish R Jain
committed
}
Manish R Jain
committed
// createTaskQuery generates the query buffer.
func createTaskQuery(attr string, sorted []uint64) []byte {
b := flatbuffers.NewBuilder(0)
ao := b.CreateString(attr)
Manish R Jain
committed
task.QueryStartUidsVector(b, len(sorted))
for i := len(sorted) - 1; i >= 0; i-- {
b.PrependUint64(sorted[i])
Manish R Jain
committed
vend := b.EndVector(len(sorted))
task.QueryStart(b)
task.QueryAddAttr(b, ao)
task.QueryAddUids(b, vend)
qend := task.QueryEnd(b)
b.Finish(qend)
return b.Bytes[b.Head():]
}
Manish R Jain
committed
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
type ListChannel struct {
TList *task.UidList
Idx int
}
func sortedUniqueUids(r *task.Result) (sorted []uint64, rerr error) {
// Let's serialize the matrix of uids in result to a
// sorted unique list of uids.
h := &x.Uint64Heap{}
heap.Init(h)
channels := make([]*ListChannel, r.UidmatrixLength())
for i := 0; i < r.UidmatrixLength(); i++ {
tlist := new(task.UidList)
if ok := r.Uidmatrix(tlist, i); !ok {
return sorted, fmt.Errorf("While parsing Uidmatrix")
}
if tlist.UidsLength() > 0 {
e := x.Elem{
Uid: tlist.Uids(0),
Idx: i,
}
heap.Push(h, e)
}
channels[i] = &ListChannel{TList: tlist, Idx: 1}
}
// The resulting list of uids will be stored here.
sorted = make([]uint64, 100)
sorted = sorted[:0]
var last uint64
last = 0
// Itearate over the heap.
for h.Len() > 0 {
me := (*h)[0] // Peek at the top element in heap.
if me.Uid != last {
sorted = append(sorted, me.Uid) // Add if unique.
last = me.Uid
}
lc := channels[me.Idx]
if lc.Idx >= lc.TList.UidsLength() {
heap.Pop(h)
} else {
uid := lc.TList.Uids(lc.Idx)
lc.Idx += 1
me.Uid = uid
(*h)[0] = me
heap.Fix(h, 0) // Faster than Pop() followed by Push().
}
}
return sorted, nil
}
func ProcessGraph(sg *SubGraph, rch chan error) {
Manish R Jain
committed
if len(sg.query) > 0 && sg.Attr != "_root_" {
sg.result, err = worker.ProcessTaskOverNetwork(sg.query)
Manish R Jain
committed
x.Err(glog, err).Error("While processing task.")
rch <- err
return
}
}
uo := flatbuffers.GetUOffsetT(sg.result)
r := new(task.Result)
r.Init(sg.result, uo)
Manish R Jain
committed
if r.ValuesLength() > 0 {
var v task.Value
if r.Values(&v, 0) {
glog.WithField("attr", sg.Attr).WithField("val", string(v.ValBytes())).
Info("Sample value")
}
}
Manish R Jain
committed
sorted, err := sortedUniqueUids(r)
if err != nil {
x.Err(glog, err).Error("While processing task.")
rch <- err
return
}
if len(sorted) == 0 {
// Looks like we're done here.
if len(sg.Children) > 0 {
Manish R Jain
committed
glog.Debugf("Have some children but no results. Life got cut short early."+
"Current attribute: %q", sg.Attr)
} else {
glog.Debugf("No more things to process for Attr: %v", sg.Attr)
}
rch <- nil
return
}
// Let's execute it in a tree fashion. Each SubGraph would break off
// as many goroutines as it's children; which would then recursively
// do the same thing.
// Buffered channel to ensure no-blockage.
childchan := make(chan error, len(sg.Children))
for i := 0; i < len(sg.Children); i++ {
child := sg.Children[i]
Manish R Jain
committed
child.query = createTaskQuery(child.Attr, sorted)
}
// Now get all the results back.
for i := 0; i < len(sg.Children); i++ {
err = <-childchan
Manish R Jain
committed
glog.WithFields(logrus.Fields{
"num_children": len(sg.Children),
"index": i,
"attr": sg.Children[i].Attr,
Manish R Jain
committed
}).Debug("Reply from child")
Manish R Jain
committed
x.Err(glog, err).Error("While processing child task.")
rch <- err
return
}
}
rch <- nil
}