diff --git a/conn/pool.go b/conn/pool.go
index b8aa13057840b02c2db6687e4a04be52194c1d7e..40e00e8bb458dd6a61068263a2cfcfabcfd7a93f 100644
--- a/conn/pool.go
+++ b/conn/pool.go
@@ -13,12 +13,12 @@ var glog = x.Log("conn")
 
 type Pool struct {
 	clients chan *rpc.Client
-	addr    string
+	Addr    string
 }
 
 func NewPool(addr string, maxCap int) *Pool {
 	p := new(Pool)
-	p.addr = addr
+	p.Addr = addr
 	p.clients = make(chan *rpc.Client, maxCap)
 	client, err := p.dialNew()
 	if err != nil {
@@ -36,7 +36,7 @@ func (p *Pool) dialNew() (*rpc.Client, error) {
 	var nconn net.Conn
 	var err error
 	for i := 0; i < 10; i++ {
-		nconn, err = d.Dial("tcp", p.addr)
+		nconn, err = d.Dial("tcp", p.Addr)
 		if err == nil {
 			break
 		}
@@ -44,7 +44,7 @@ func (p *Pool) dialNew() (*rpc.Client, error) {
 			break
 		}
 
-		glog.WithField("error", err).WithField("addr", p.addr).
+		glog.WithField("error", err).WithField("addr", p.Addr).
 			Info("Retrying connection...")
 		time.Sleep(10 * time.Second)
 	}
diff --git a/query/query.go b/query/query.go
index c5ea0bd21dc9e7757b5023425fd9d4332d8fd0f2..e564c87355b2423fe08acc5db0a26d1d4b5ac2bb 100644
--- a/query/query.go
+++ b/query/query.go
@@ -406,7 +406,7 @@ func ProcessGraph(sg *SubGraph, rch chan error) {
 	var err error
 	if len(sg.query) > 0 && sg.Attr != "_root_" {
 		// This task execution would go over the wire in later versions.
-		sg.result, err = worker.ProcessTask(sg.query)
+		sg.result, err = worker.ProcessTaskOverNetwork(sg.query)
 		if err != nil {
 			x.Err(glog, err).Error("While processing task.")
 			rch <- err
diff --git a/query/query_test.go b/query/query_test.go
index 397bcaa04f13ec318ff8c2a99c9bbbd6979ace71..7589cdf349da3b416a1be080d2f403fba689142c 100644
--- a/query/query_test.go
+++ b/query/query_test.go
@@ -103,7 +103,7 @@ func TestNewGraph(t *testing.T) {
 		t.Error(err)
 	}
 
-	worker.Init(ps, nil, nil)
+	worker.Init(ps, nil, 0, 1)
 
 	uo := flatbuffers.GetUOffsetT(sg.result)
 	r := new(task.Result)
@@ -134,7 +134,7 @@ func populateGraph(t *testing.T) (string, *store.Store) {
 	ps := new(store.Store)
 	ps.Init(dir)
 
-	worker.Init(ps, nil, nil)
+	worker.Init(ps, nil, 0, 1)
 
 	clog := commit.NewLogger(dir, "mutations", 50<<20)
 	clog.Init()
diff --git a/server/main.go b/server/main.go
index fce2df107fe2f728e272527002ebd2aa7dc25097..5b1edde27acfbc837e62f4e14426ee22d3daafd6 100644
--- a/server/main.go
+++ b/server/main.go
@@ -50,7 +50,6 @@ var instanceIdx = flag.Uint64("instanceIdx", 0,
 	"serves only entities whose Fingerprint % numInstance == instanceIdx.")
 var workers = flag.String("workers", "",
 	"Comma separated list of IP addresses of workers")
-var numInstances uint64
 
 func addCorsHeaders(w http.ResponseWriter) {
 	w.Header().Set("Access-Control-Allow-Origin", "*")
@@ -167,21 +166,23 @@ func main() {
 	defer clog.Close()
 
 	addrs := strings.Split(*workers, ",")
-	numInstances = len(addrs)
+	lenAddr := uint64(len(addrs))
+
 	posting.Init(clog)
+
 	if *instanceIdx != 0 {
-		worker.Init(ps, nil, addrs, *instanceIdx)
+		worker.Init(ps, nil, *instanceIdx, lenAddr)
 		uid.Init(nil)
 	} else {
 		uidStore := new(store.Store)
 		uidStore.Init(*uidDir)
 		defer uidStore.Close()
 		// Only server instance 0 will have uidStore
-		worker.Init(ps, uidStore, addrs, *instanceIdx)
+		worker.Init(ps, uidStore, *instanceIdx, lenAddr)
 		uid.Init(uidStore)
 	}
 
-	worker.Connect()
+	worker.Connect(addrs)
 
 	http.HandleFunc("/query", queryHandler)
 	glog.WithField("port", *port).Info("Listening for requests...")
diff --git a/server/main_test.go b/server/main_test.go
index 78c20da95f409349df5aed4163286ba06bb8b159..962256cdcbc3c1e314d34d0fd9f7272894b98f64 100644
--- a/server/main_test.go
+++ b/server/main_test.go
@@ -62,7 +62,7 @@ func prepare() (dir1, dir2 string, ps *store.Store, clog *commit.Logger, rerr er
 	clog.Init()
 
 	posting.Init(clog)
-	worker.Init(ps, nil, nil)
+	worker.Init(ps, nil, 0, 1)
 	uid.Init(ps)
 	loader.Init(ps, ps)
 
diff --git a/worker/worker.go b/worker/worker.go
index 324f0eddc98524d7fb566eaf57e2666ce031fcb6..bcd0893aed8e51af90085d5bfb3b1781eb0de7b4 100644
--- a/worker/worker.go
+++ b/worker/worker.go
@@ -20,19 +20,18 @@ var workerPort = flag.String("workerport", ":12345",
 	"Port used by worker for internal communication.")
 
 var glog = x.Log("worker")
-var dataStore, xiduidStore *store.Store
+var dataStore, uidStore *store.Store
 var pools []*conn.Pool
-var addrs []string
-var instanceIdx uint64
+var numInstances, instanceIdx uint64
 
-func Init(ps, xuStore *store.Store, workerList []string, idx uint64) {
+func Init(ps, uStore *store.Store, idx, numInst uint64) {
 	dataStore = ps
-	xiduidStore = xuStore
-	addrs = workerList
+	uidStore = uStore
 	instanceIdx = idx
+	numInstances = numInst
 }
 
-func Connect() {
+func Connect(workerList []string) {
 	w := new(Worker)
 	if err := rpc.Register(w); err != nil {
 		glog.Fatal(err)
@@ -40,8 +39,13 @@ func Connect() {
 	if err := runServer(*workerPort); err != nil {
 		glog.Fatal(err)
 	}
+	if uint64(len(workerList)) != numInstances {
+		glog.WithField("len(list)", len(workerList)).
+			WithField("numInstances", numInstances).
+			Fatalf("Wrong number of instances in workerList")
+	}
 
-	for _, addr := range addrs {
+	for _, addr := range workerList {
 		if len(addr) == 0 {
 			continue
 		}
@@ -60,16 +64,49 @@ func Connect() {
 	glog.Info("Server started. Clients connected.")
 }
 
+func ProcessTaskOverNetwork(qu []byte) (result []byte, rerr error) {
+	uo := flatbuffers.GetUOffsetT(qu)
+	q := new(task.Query)
+	q.Init(qu, uo)
+
+	attr := string(q.Attr())
+	idx := farm.Fingerprint64([]byte(attr)) % numInstances
+
+	var runHere bool
+	if attr == "_xid_" || attr == "_uid_" {
+		idx = 0
+		runHere = (instanceIdx == 0)
+	} else {
+		runHere = (instanceIdx == idx)
+	}
+
+	if runHere {
+		return ProcessTask(qu)
+	}
+
+	pool := pools[idx]
+	addr := pool.Addr
+	query := new(conn.Query)
+	query.Data = qu
+	reply := new(conn.Reply)
+	if err := pool.Call("Worker.ServeTask", query, reply); err != nil {
+		glog.WithField("call", "Worker.ServeTask").Fatal(err)
+	}
+	glog.WithField("reply", string(reply.Data)).WithField("addr", addr).
+		Info("Got reply from server")
+	return reply.Data, nil
+}
+
 func ProcessTask(query []byte) (result []byte, rerr error) {
 	uo := flatbuffers.GetUOffsetT(query)
 	q := new(task.Query)
 	q.Init(query, uo)
+	attr := string(q.Attr())
 
 	b := flatbuffers.NewBuilder(0)
 	voffsets := make([]flatbuffers.UOffsetT, q.UidsLength())
 	uoffsets := make([]flatbuffers.UOffsetT, q.UidsLength())
 
-	attr := string(q.Attr())
 	for i := 0; i < q.UidsLength(); i++ {
 		uid := q.Uids(i)
 		key := posting.Key(uid, attr)
@@ -159,7 +196,8 @@ func (w *Worker) Mutate(query *conn.Query, reply *conn.Reply) (rerr error) {
 		plist := posting.GetOrCreate(key, dataStore)
 		if err := plist.AddMutation(edge, posting.Set); err != nil {
 			left.Set = append(left.Set, edge)
-			glog.WithError(err).WithField("edge", edge).Error("While adding mutation.")
+			glog.WithError(err).WithField("edge", edge).
+				Error("While adding mutation.")
 			continue
 		}
 	}
@@ -167,6 +205,22 @@ func (w *Worker) Mutate(query *conn.Query, reply *conn.Reply) (rerr error) {
 	return
 }
 
+func (w *Worker) ServeTask(query *conn.Query, reply *conn.Reply) (rerr error) {
+	uo := flatbuffers.GetUOffsetT(query.Data)
+	q := new(task.Query)
+	q.Init(query.Data, uo)
+	attr := string(q.Attr())
+
+	if farm.Fingerprint64([]byte(attr))%numInstances == instanceIdx {
+		reply.Data, rerr = ProcessTask(query.Data)
+	} else {
+		glog.WithField("attribute", attr).
+			WithField("instanceIdx", instanceIdx).
+			Fatalf("Request sent to wrong server")
+	}
+	return rerr
+}
+
 func serveRequests(irwc io.ReadWriteCloser) {
 	for {
 		sc := &conn.ServerCodec{
diff --git a/worker/worker_test.go b/worker/worker_test.go
index 35ed56842824af45d10239d741e4f8e9335e495d..809eb05c876431c5533e50a30f0c200d45cbadfb 100644
--- a/worker/worker_test.go
+++ b/worker/worker_test.go
@@ -58,7 +58,7 @@ func TestProcessTask(t *testing.T) {
 	defer clog.Close()
 
 	posting.Init(clog)
-	Init(ps, nil, nil)
+	Init(ps, nil, 0, 1)
 
 	edge := x.DirectedEdge{
 		ValueId:   23,