diff --git a/include/tvm/attrs.h b/include/tvm/attrs.h
index 51d916ca488d41dd6eb156fe3031a383fc87def0..cc1abe6e57de2094a211fd82dc3e74a834a966b2 100644
--- a/include/tvm/attrs.h
+++ b/include/tvm/attrs.h
@@ -735,12 +735,12 @@ template<typename DerivedType>
 class AttrsNode : public BaseAttrsNode {
  public:
   void VisitAttrs(AttrVisitor* v) final {
-    detail::AttrNormalVisitor vis(v);
+    ::tvm::detail::AttrNormalVisitor vis(v);
     self()->__VisitAttrs__(vis);
   }
 
   void VisitNonDefaultAttrs(AttrVisitor* v) final {
-    detail::AttrNonDefaultVisitor vis(v);
+    ::tvm::detail::AttrNonDefaultVisitor vis(v);
     self()->__VisitAttrs__(vis);
   }
 
@@ -761,7 +761,7 @@ class AttrsNode : public BaseAttrsNode {
         }
         return false;
       };
-      auto vis = detail::CreateInitVisitor(DerivedType::_type_key, ffind);
+      auto vis = ::tvm::detail::CreateInitVisitor(DerivedType::_type_key, ffind);
       self()->__VisitAttrs__(vis);
       hit_count = vis.hit_count_;
     } else {
@@ -779,14 +779,14 @@ class AttrsNode : public BaseAttrsNode {
         }
         return false;
       };
-      auto vis = detail::CreateInitVisitor(DerivedType::_type_key, ffind);
+      auto vis = ::tvm::detail::CreateInitVisitor(DerivedType::_type_key, ffind);
       self()->__VisitAttrs__(vis);
       hit_count = vis.hit_count_;
     }
     // error handling, slow path
     if (hit_count * 2 != args.size() && !allow_unknown) {
       for (int i = 0; i < args.size(); i += 2) {
-        detail::AttrExistVisitor visitor;
+        ::tvm::detail::AttrExistVisitor visitor;
         visitor.key_ = args[i].operator std::string();
         self()->__VisitAttrs__(visitor);
         if (!visitor.exist_) {
@@ -803,7 +803,7 @@ class AttrsNode : public BaseAttrsNode {
   }
 
   Array<AttrFieldInfo> ListFieldInfo() const final {
-    detail::AttrDocVisitor visitor;
+    ::tvm::detail::AttrDocVisitor visitor;
     self()->__VisitAttrs__(visitor);
     return visitor.fields_;
   }
@@ -813,13 +813,13 @@ class AttrsNode : public BaseAttrsNode {
     if (pself == other) return true;
     if (other == nullptr) return false;
     if (pself->type_index() != other->type_index()) return false;
-    detail::AttrsEqualVisitor visitor(pself, other, equal);
+    ::tvm::detail::AttrsEqualVisitor visitor(pself, other, equal);
     self()->__VisitAttrs__(visitor);
     return visitor.result_;
   }
 
   size_t ContentHash(AttrsHash hasher) const final {
-    detail::AttrsHashVisitor visitor(hasher);
+    ::tvm::detail::AttrsHashVisitor visitor(hasher);
     visitor.result_ = std::hash<std::string>()(this->type_key());
     self()->__VisitAttrs__(visitor);
     return visitor.result_;
diff --git a/include/tvm/build_module.h b/include/tvm/build_module.h
index 7aafad4216e159a62a29227964eed23c98c92381..ddd54f604a68effff83c1d82b46f3b37c186056a 100644
--- a/include/tvm/build_module.h
+++ b/include/tvm/build_module.h
@@ -417,7 +417,7 @@ inline TVMRetValue GenericFunc::operator()(Args&& ...args) const {
   const int kArraySize = kNumArgs > 0 ? kNumArgs : 1;
   TVMValue values[kArraySize];
   int type_codes[kArraySize];
-  detail::for_each(TVMArgsSetter(values, type_codes),
+  runtime::detail::for_each(TVMArgsSetter(values, type_codes),
     std::forward<Args>(args)...);
   TVMRetValue rv;
   CallPacked(TVMArgs(values, type_codes, kNumArgs), &rv);
diff --git a/python/tvm/relay/interpreter.py b/python/tvm/relay/interpreter.py
index 4dfe3e02989e4dc299f038d96b2ac0f7af80cb53..bd8ef0d1441502b32eda4bdc9325f5890d887c06 100644
--- a/python/tvm/relay/interpreter.py
+++ b/python/tvm/relay/interpreter.py
@@ -138,7 +138,8 @@ class Executor(object):
         """
         if params:
             scope_builder = ScopeBuilder()
-            for key, value in params:
+            for key in params:
+                value = params[key]
                 scope_builder.let(key, value)
             scope_builder.ret(expr)
             expr = scope_builder.get()
@@ -146,7 +147,17 @@ class Executor(object):
         if isinstance(expr, Function):
             assert not ir_pass.free_vars(expr)
 
-        return self._make_executor(expr)
+        executor = self._make_executor(expr)
+
+        # If we are evaluating a function or top-level defintion
+        # the user must call the function themselves.
+        #
+        # If we are evaluating an open term with parameters we will
+        # just return them the result.
+        if isinstance(expr, (Function, GlobalVar)):
+            return executor
+        else:
+            return executor()
 
 
 class Interpreter(Executor):
@@ -168,10 +179,14 @@ class Interpreter(Executor):
                 self.mod._add(expr, func, True)
                 opt_expr = Call(expr, relay_args)
                 return _interpreter.evaluate(self.mod, opt_expr)
-            else:
+            elif isinstance(expr, Function):
                 call = Call(expr, relay_args)
                 opt_expr = self.optimize(call)
                 return _interpreter.evaluate(self.mod, opt_expr)
+            else:
+                assert not args
+                opt_expr = self.optimize(expr)
+                return _interpreter.evaluate(self.mod, opt_expr)
 
         return _interp_wrapper
 
diff --git a/python/tvm/relay/op/__init__.py b/python/tvm/relay/op/__init__.py
index 7b61fd10f5b05e6e775027381bdbc87fd6cb960d..9b581486608bf33f998e447ddd4e3aa423297249 100644
--- a/python/tvm/relay/op/__init__.py
+++ b/python/tvm/relay/op/__init__.py
@@ -1,7 +1,7 @@
 #pylint: disable=wildcard-import, redefined-builtin
 """Relay core operators."""
 # operator defs
-from .op import get, register, Op
+from .op import get, register, register_schedule, register_compute, Op
 
 # Operators
 from .reduce import *
diff --git a/python/tvm/relay/op/_tensor.py b/python/tvm/relay/op/_tensor.py
index 6ccb394ef8db43b0ed87aee7f33a3ba2a49fc4eb..5841d278378a1f9f0fd09f08b1f727ac3edfbf15 100644
--- a/python/tvm/relay/op/_tensor.py
+++ b/python/tvm/relay/op/_tensor.py
@@ -1,49 +1,272 @@
 #pylint: disable=invalid-name, unused-argument
 """Backend compiler related feature registration"""
+from __future__ import absolute_import
 import tvm
 import topi
-from . import register
+import topi.cuda
+from . import register_schedule, register_compute
 
+def schedule_injective(outputs, target):
+    """Generic schedule for binary broadcast."""
+    with tvm.target.create(target):
+        return topi.generic.schedule_injective(outputs)
+
+schedule_broadcast = schedule_injective
+schedule_elemwise = schedule_injective
+
+# log
+def log_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.log(inputs[0])]
+
+register_compute("log", log_compute)
+register_schedule("log", schedule_broadcast)
+
+# exp
+def exp_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.exp(inputs[0])]
+
+register_compute("exp", exp_compute)
+register_schedule("exp", schedule_broadcast)
+
+# sqrt
+def sqrt_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.sqrt(inputs[0])]
+
+register_compute("sqrt", sqrt_compute)
+register_schedule("sqrt", schedule_broadcast)
+
+# sigmoid
+def sigmoid_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.sigmoid(inputs[0])]
+
+register_compute("sigmoid", sigmoid_compute)
+register_schedule("sigmoid", schedule_broadcast)
+
+# floor
+def floor_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.floor(inputs[0])]
+
+register_compute("floor", floor_compute)
+register_schedule("floor", schedule_broadcast)
+
+# ceil
+def ceil_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.ceil(inputs[0])]
+
+register_compute("ceil", ceil_compute)
+register_schedule("ceil", schedule_broadcast)
+
+# trunc
+def trunc_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.trunc(inputs[0])]
+
+register_compute("trunc", trunc_compute)
+register_schedule("trunc", schedule_broadcast)
+
+# round
+def round_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.round(inputs[0])]
+
+register_compute("round", round_compute)
+register_schedule("round", schedule_broadcast)
+
+# abs
+def abs_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.abs(inputs[0])]
+
+register_compute("abs", abs_compute)
+register_schedule("abs", schedule_broadcast)
+
+# tanh
+def tanh_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.tanh(inputs[0])]
+
+register_compute("tanh", tanh_compute)
+register_schedule("tanh", schedule_broadcast)
+
+# negative
+def negative_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.negative(inputs[0])]
+
+register_compute("negative", negative_compute)
+register_schedule("negative", schedule_broadcast)
+
+# add
 def add_compute(attrs, inputs, output_type, target):
     assert len(inputs) == 2
     return [topi.add(inputs[0], inputs[1])]
 
-def add_schedule(outputs, target):
-    assert len(outputs) == 1
-    return tvm.create_schedule(outputs[0].op)
-
-register("add", "FTVMCompute", add_compute)
-register("add", "FTVMSchedule", add_schedule)
+register_compute("add", add_compute)
+register_schedule("add", schedule_injective)
 
+# subtract
 def subtract_compute(attrs, inputs, output_type, target):
     assert len(inputs) == 2
     return [topi.subtract(inputs[0], inputs[1])]
 
-def subtract_schedule(outputs, target):
-    assert len(outputs) == 1
-    return tvm.create_schedule(outputs[0].op)
-
-register("subtract", "FTVMCompute", subtract_compute)
-register("subtract", "FTVMSchedule", subtract_schedule)
+register_compute("subtract", subtract_compute)
+register_schedule("subtract", schedule_broadcast)
 
+# multiply
 def multiply_compute(attrs, inputs, output_type, target):
     assert len(inputs) == 2
     return [topi.multiply(inputs[0], inputs[1])]
 
-def multiply_schedule(outputs, target):
-    assert len(outputs) == 1
-    return tvm.create_schedule(outputs[0].op)
+register_compute("multiply", multiply_compute)
+register_schedule("multiply", schedule_broadcast)
+
+# divide
+def divide_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.divide(inputs[0], inputs[1])]
+
+register_compute("divide", divide_compute)
+register_schedule("divide", schedule_broadcast)
 
-register("multiply", "FTVMCompute", multiply_compute)
-register("multiply", "FTVMSchedule", multiply_schedule)
+# pow
+def pow_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.power(inputs[0], inputs[1])]
 
+register_compute("pow", pow_compute)
+register_schedule("pow", schedule_injective)
+
+# mod
+def mod_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.mod(inputs[0], inputs[1])]
+
+register_compute("mod", mod_compute)
+register_schedule("mod", schedule_broadcast)
+
+# equal
 def equal_compute(attrs, inputs, output_type, target):
     assert len(inputs) == 2
     return [topi.equal(inputs[0], inputs[1])]
 
-def equal_schedule(outputs, target):
-    assert len(outputs) == 1
-    return tvm.create_schedule(outputs[0].op)
+register_compute("equal", equal_compute)
+register_schedule("equal", schedule_broadcast)
+
+# not_equal
+def not_equal_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.not_equal(inputs[0], inputs[1])]
+
+register_compute("not_equal", not_equal_compute)
+register_schedule("not_equal", schedule_broadcast)
+
+# less
+def less_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.less(inputs[0], inputs[1])]
+
+register_compute("less", less_compute)
+register_schedule("less", schedule_broadcast)
+
+# less equal
+def less_equal_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.less_equal(inputs[0], inputs[1])]
+
+register_compute("less_equal", less_equal_compute)
+register_schedule("less_equal", schedule_broadcast)
+
+# greater
+def greater_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.greater(inputs[0], inputs[1])]
+
+register_compute("greater", greater_compute)
+register_schedule("greater", schedule_broadcast)
+
+# greater equal
+def greater_equal_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.greater_equal(inputs[0], inputs[1])]
+
+register_compute("greater_equal", greater_equal_compute)
+register_schedule("greater_equal", schedule_broadcast)
+
+# maximum
+def maximum_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.maximum(inputs[0], inputs[1])]
+
+register_compute("maximum_compute", maximum_compute)
+register_schedule("maximum_compute", schedule_injective)
+
+# minimum
+def minimum_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.minimum(inputs[0], inputs[1])]
+
+register_compute("minimum", minimum_compute)
+register_schedule("minimum", schedule_injective)
+
+# right shift
+def right_shift_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.right_shift(inputs[0], inputs[1])]
+
+register_compute("right_shift", right_shift_compute)
+register_schedule("right_shift", schedule_injective)
+
+# lift shift
+def left_shift_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 2
+    return [topi.left_shift(inputs[0], inputs[1])]
+
+register_compute("left_shift", left_shift_compute)
+register_schedule("left_shift", schedule_injective)
+
+# zeros
+def zeros_compute(attrs, inputs, output_type, target):
+    assert not inputs
+    return [topi.full(output_type.shape, output_type.dtype, 0.0)]
+
+register_compute("zeros", zeros_compute)
+register_schedule("zeros", schedule_injective)
+
+# zeros_like
+def zeros_like_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.full_like(inputs[0], 0.0)]
+
+register_compute("zeros_like", zeros_like_compute)
+register_schedule("zeros_like", schedule_injective)
+
+# ones
+def ones_compute(attrs, inputs, output_type, target):
+    assert not inputs
+    return [topi.full(output_type.shape, output_type.dtype, 1.0)]
+
+register_compute("ones", ones_compute)
+register_schedule("ones", schedule_injective)
+
+# ones_like
+def ones_like(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.full_like(inputs[0], 1.0)]
+
+register_compute("ones_like", ones_like)
+register_schedule("ones_like", schedule_injective)
+
+# clip
+def clip_compute(attrs, inputs, output_type, target):
+    assert len(inputs) == 1
+    return [topi.clip(inputs[0], attrs.a_min, attrs.a_max)]
+
 
-register("equal", "FTVMCompute", equal_compute)
-register("equal", "FTVMSchedule", equal_schedule)
+register_compute("clip", clip_compute)
+register_schedule("clip", schedule_injective)
diff --git a/python/tvm/relay/op/op.py b/python/tvm/relay/op/op.py
index 0c09f39a3c832ddf1c4901cd1c33f965e18ea6f9..91523f65f6b717ab56429599bf814849688eb3a5 100644
--- a/python/tvm/relay/op/op.py
+++ b/python/tvm/relay/op/op.py
@@ -74,6 +74,11 @@ def register(op_name, attr_key, value=None, level=10):
         return v
     return _register(value) if value else _register
 
+def register_schedule(op_name, schedule):
+    register(op_name, "FTVMSchedule", schedule)
+
+def register_compute(op_name, compute):
+    register(op_name, "FTVMCompute", compute)
 
 _init_api("relay.op", __name__)
 
diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py
index 3c432b58092d1b30fa1096ed757593fed2fc6063..2505da8f1dfd6913ceb1e3a97f06287c1646b16f 100644
--- a/python/tvm/relay/op/tensor.py
+++ b/python/tvm/relay/op/tensor.py
@@ -213,9 +213,8 @@ def add(lhs, rhs):
     """
     return _make.add(lhs, rhs)
 
-
-def multiply(lhs, rhs):
-    """Multiplication with numpy-style broadcasting.
+def subtract(lhs, rhs):
+    """Subtraction with numpy-style broadcasting.
 
     Parameters
     ----------
@@ -229,11 +228,10 @@ def multiply(lhs, rhs):
     result : relay.Expr
         The computed result.
     """
-    return _make.multiply(lhs, rhs)
-
+    return _make.subtract(lhs, rhs)
 
-def divide(lhs, rhs):
-    """Division with numpy-style broadcasting.
+def multiply(lhs, rhs):
+    """Multiplication with numpy-style broadcasting.
 
     Parameters
     ----------
@@ -247,11 +245,11 @@ def divide(lhs, rhs):
     result : relay.Expr
         The computed result.
     """
-    return _make.divide(lhs, rhs)
+    return _make.multiply(lhs, rhs)
 
 
-def pow(lhs, rhs):
-    """Power with numpy-style broadcasting.
+def divide(lhs, rhs):
+    """Division with numpy-style broadcasting.
 
     Parameters
     ----------
@@ -265,11 +263,11 @@ def pow(lhs, rhs):
     result : relay.Expr
         The computed result.
     """
-    return _make.pow(lhs, rhs)
+    return _make.divide(lhs, rhs)
 
 
-def mod(lhs, rhs):
-    """Mod with numpy-style broadcasting.
+def pow(lhs, rhs):
+    """Power with numpy-style broadcasting.
 
     Parameters
     ----------
@@ -283,11 +281,11 @@ def mod(lhs, rhs):
     result : relay.Expr
         The computed result.
     """
-    return _make.mod(lhs, rhs)
+    return _make.pow(lhs, rhs)
 
 
-def subtract(lhs, rhs):
-    """Subtraction with numpy-style broadcasting.
+def mod(lhs, rhs):
+    """Mod with numpy-style broadcasting.
 
     Parameters
     ----------
@@ -301,7 +299,7 @@ def subtract(lhs, rhs):
     result : relay.Expr
         The computed result.
     """
-    return _make.subtract(lhs, rhs)
+    return _make.mod(lhs, rhs)
 
 
 def equal(lhs, rhs):
@@ -553,7 +551,6 @@ def ones_like(data):
     """
     return _make.ones_like(data)
 
-
 def clip(a, a_min, a_max):
     """Clip the elements in `a` between `a_min` and `a_max`.
     `a_min` and `a_max` are cast to `a`'s dtype.
diff --git a/src/relay/pass/lower_ops.cc b/src/relay/pass/lower_ops.cc
index f2c8ceba866d3f05d0166e5b00e63b81d904d357..55102fe5cf67d9daf4089805bbfbcc1e704ab363 100644
--- a/src/relay/pass/lower_ops.cc
+++ b/src/relay/pass/lower_ops.cc
@@ -8,6 +8,7 @@
  */
 #include <tvm/lowered_func.h>
 #include <tvm/operation.h>
+#include <tvm/build_module.h>
 #include <tvm/relay/expr_functor.h>
 #include <tvm/relay/logging.h>
 #include <tvm/relay/pass.h>
@@ -155,8 +156,8 @@ struct LiveFunctions : ExprVisitor {
 };
 
 using FCompute = TypedPackedFunc<Array<Tensor>(
-    const Attrs&, const Array<Tensor>&, Type, std::string)>;
-using FSchedule = TypedPackedFunc<Schedule(const Array<Tensor>&, std::string)>;
+    const Attrs&, const Array<Tensor>&, Type, tvm::Target)>;
+using FSchedule = TypedPackedFunc<Schedule(const Array<Tensor>&, tvm::Target)>;
 
 /*! \brief Return the set of operators in their TVM format. */
 Array<LoweredOp> LowerOps(const Module& mod, const Expr& e,
@@ -179,7 +180,7 @@ Array<LoweredOp> LowerOps(const Module& mod, const Expr& e,
     auto func = mod->Lookup(func_name);
     auto call = Downcast<Call>(func->body);
     auto op_node = call->op.as<OpNode>();
-    CHECK(op_node) << "violated invariant that primtiive calls contain a single op call";
+    CHECK(op_node) << "violated invariant that primtive calls contain a single op call";
     auto op = GetRef<Op>(op_node);
     RELAY_LOG(INFO) << "LowerOps: Lowering " << op->name;
 
@@ -197,10 +198,11 @@ Array<LoweredOp> LowerOps(const Module& mod, const Expr& e,
       i++;
     }
 
-    auto output_tt = op->op_type->ret_type;
+    auto output_tt = call->checked_type();
+    auto target_node = Target::create(target);
     Array<Tensor> outputs =
-        compute_reg[op](call->attrs, inputs, output_tt, target);
-    auto schedule = schedule_reg[op](outputs, target);
+        compute_reg[op](call->attrs, inputs, output_tt, target_node);
+    auto schedule = schedule_reg[op](outputs, target_node);
     size_t hash = StructuralHash()(func);
     LoweredFunc lf =
         flower(op->name + std::to_string(hash), schedule, inputs, outputs);
diff --git a/tests/python/relay/test_op_level1.py b/tests/python/relay/test_op_level1.py
index a622dfc2cbd42d7d76e684765473ca3f54877aae..7ab13409cc43a08cda888687b849681183a65ea5 100644
--- a/tests/python/relay/test_op_level1.py
+++ b/tests/python/relay/test_op_level1.py
@@ -1,11 +1,23 @@
+import math
 import tvm
 import numpy as np
 from tvm import relay
+from tvm.relay.interpreter import create_executor
 
+def sigmoid(x):
+    one = np.ones_like(x)
+    return one / (one + np.exp(-x))
+
+def relu(x):
+    x_copy = np.copy(x)
+    np.maximum(x_copy, 0, x_copy)
+    return x_copy
 
 def test_unary_op():
-    def check_single_op(opfunc):
-        tp = relay.TensorType((10, 4), "float32")
+    def check_single_op(opfunc, ref):
+        shape = (10, 4)
+        dtype = 'float32'
+        tp = relay.TensorType(shape, dtype)
         x = relay.var("x", tp)
         y = opfunc(x)
         # test printer
@@ -13,20 +25,33 @@ def test_unary_op():
         # test type inference
         assert relay.ir_pass.infer_type(y).checked_type == tp
 
-    for opfunc in [tvm.relay.log,
-                   tvm.relay.exp,
-                   tvm.relay.sqrt,
-                   tvm.relay.sigmoid,
-                   tvm.relay.tanh,
-                   relay.nn.relu]:
-        check_single_op(opfunc)
+        if ref is not None:
+            data = np.random.rand(*shape).astype(dtype)
+            intrp = create_executor()
+            op_res = intrp.evaluate(y, { x: relay.const(data) })
+            ref_res = ref(data)
+            np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
+
+    for opfunc, ref in [(tvm.relay.log, np.log),
+                   (tvm.relay.exp, np.exp),
+                   (tvm.relay.sqrt, np.sqrt),
+                   (tvm.relay.sigmoid, sigmoid),
+                   (tvm.relay.tanh, np.tanh),
+                   (relay.nn.relu, None)]: # Just add RELU here after registering.
+        check_single_op(opfunc, ref)
 
 
 def test_binary_op():
-    def check_binary_op(opfunc):
+    def inst(vars, sh):
+        return [vars.get(s, s) for s in sh]
+
+    def check_binary_op(opfunc, ref):
+        # TODO(@jroesch): this piece of code improperly uses type variables.
         n = tvm.var("n")
-        t1 = relay.TensorType((5, n, 5))
-        t2 = relay.TensorType((n, 1))
+        s1 = (5, n, 5)
+        s2 = (n, 1)
+        t1 = relay.TensorType(s1)
+        t2 = relay.TensorType(s2)
         x = relay.var("x", t1)
         y = relay.var("y", t2)
         z = opfunc(x, y)
@@ -34,12 +59,25 @@ def test_binary_op():
         assert ("%0 = {}(%x, %y)".format(z.op.name)) in z.astext()
         assert relay.ir_pass.infer_type(z).checked_type == t1
 
-    for opfunc in [relay.add,
-                   relay.subtract,
-                   relay.mod,
-                   relay.multiply,
-                   relay.divide]:
-        check_binary_op(opfunc)
+        if ref is not None:
+            t1 = relay.TensorType((5, 10, 5))
+            t2 = relay.TensorType((5, 10, 5))
+            x = relay.var("x", t1)
+            y = relay.var("y", t2)
+            z = opfunc(x, y)
+            x_data = np.random.rand(5, 10, 5).astype(t1.dtype)
+            y_data = np.random.rand(5, 10, 5).astype(t2.dtype)
+            intrp = create_executor()
+            op_res = intrp.evaluate(z, { x: relay.const(x_data), y: relay.const(y_data) })
+            ref_res = ref(x_data, y_data)
+            np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
+
+    for opfunc, ref in [(relay.add, np.add),
+                   (relay.subtract, np.subtract),
+                   (relay.mod, np.mod),
+                   (relay.multiply, np.multiply),
+                   (relay.divide, np.divide)]:
+        check_binary_op(opfunc, ref)
 
 
 def test_bias_add():
@@ -96,6 +134,15 @@ def test_concatenate_infer_type():
     zz = relay.ir_pass.infer_type(z)
     assert zz.checked_type == relay.TensorType((n, t + t, 100))
 
+    # x = relay.var("x", shape=(10, 5))
+    # y = relay.var("y", shape=(10, 5))
+    # z = relay.concatenate((x, y), axis=1)
+    # intrp = create_executor()
+    # x_data = np.random.rand(10, 5).astype('float32')
+    # y_data = np.random.rand(10, 5).astype('float32')
+    # op_res = intrp.evaluate(z, { x: relay.const(x_data), y: relay.const(y_data) })
+    # ref_res = np.concatenate(x_data, y_data, axis=1)
+    # np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
 
 def test_dropout():
     n, t, d = tvm.var("n"), tvm.var("t"), tvm.var("d")
diff --git a/tests/python/relay/test_op_level3.py b/tests/python/relay/test_op_level3.py
index 6f06c8698e3f2422f648699c7ec960d99f746c96..26eccf991d0eb9e540943c32d016eb374dc59656 100644
--- a/tests/python/relay/test_op_level3.py
+++ b/tests/python/relay/test_op_level3.py
@@ -3,29 +3,40 @@
 import tvm
 import numpy as np
 from tvm import relay
+from tvm.relay import create_executor
 from nose.tools import raises
 
 def test_zeros_ones():
-    for op in [relay.zeros, relay.ones]:
+    for op, ref in [(relay.zeros, np.zeros), (relay.ones, np.ones)]:
         y = op(shape=(124, 50), dtype="float64")
         yy = relay.ir_pass.infer_type(y)
         assert yy.checked_type == relay.TensorType((124, 50), "float64")
+        intrp = create_executor()
+        intrp_res = intrp.evaluate(y).asnumpy()
+        np.testing.assert_allclose(intrp_res, ref((124, 50), 'float64'))
 
 def test_unary_identity():
-    for op in [relay.zeros_like,
-               relay.ones_like,
-               relay.ceil,
-               relay.floor,
-               relay.trunc,
-               relay.round,
-               relay.abs,
-               relay.copy,
-               relay.negative]:
-        x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
+    for op, ref in [(relay.zeros_like, np.zeros_like),
+               (relay.ones_like, np.ones_like),
+               (relay.ceil, np.ceil),
+               (relay.floor, np.floor),
+               (relay.trunc, np.trunc),
+               (relay.round, np.round),
+               (relay.abs, np.abs),
+               (relay.copy, None), # np.copy
+               (relay.negative, np.negative)]:
+        shape = (8, 9, 4)
+        x = relay.var("x", relay.TensorType(shape, "float32"))
         y = op(x)
         yy = relay.ir_pass.infer_type(y)
-        assert yy.checked_type == relay.TensorType((8, 9, 4), "float32")
+        assert yy.checked_type == relay.TensorType(shape, "float32")
 
+        if ref is not None:
+            data = np.random.rand(*shape).astype('float32')
+            intrp = create_executor()
+            op_res = intrp.evaluate(y, { x: relay.const(data) })
+            ref_res = ref(data)
+            np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
 
 def test_cast():
     x = relay.var("x", relay.TensorType((8, 9, 4), "float32"))
@@ -35,12 +46,20 @@ def test_cast():
     assert yy.checked_type == relay.TensorType((8, 9, 4), "int32")
 
 
-def test_clip_type():
+def test_clip():
     a = relay.var("a", relay.TensorType((10, 4), "float32"))
     y = relay.clip(a, 1., 4.)
     yy = relay.ir_pass.infer_type(y)
     assert yy.checked_type == relay.TensorType((10, 4), "float32")
 
+    data = np.random.rand(10, 4).astype('float32')
+    intrp = create_executor()
+    op_res = intrp.evaluate(y, { a: relay.const(data) })
+    ref_res = np.clip(data, 1., 4.)
+    np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
+
+
+
 
 def test_transpose_infer_type():
     n, t, d = tvm.var("n"), tvm.var("t"), 100
@@ -226,7 +245,7 @@ if __name__ == "__main__":
     test_cast()
     test_zeros_ones()
     test_unary_identity()
-    test_clip_type()
+    test_clip()
     test_transpose_infer_type()
     test_reshape_infer_type()
     test_reshape_like()
diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py
index 2dc643cfd7e4a838c253f941405ceb58a404cabe..d20997010b4cdd7380c637f74a66ccceee417c07 100644
--- a/tests/python/relay/test_op_level4.py
+++ b/tests/python/relay/test_op_level4.py
@@ -1,10 +1,11 @@
 import tvm
 import numpy as np
 from tvm import relay
+from tvm.relay import create_executor
 
 
 def test_binary_op():
-    def check_binary_op(opfunc):
+    def check_binary_op(opfunc, ref):
         n = tvm.var("n")
         t1 = relay.TensorType((5, n, 5))
         t2 = relay.TensorType((n, 1))
@@ -15,17 +16,30 @@ def test_binary_op():
         assert ("%0 = {}(%x, %y)".format(z.op.name)) in z.astext()
         assert relay.ir_pass.infer_type(z).checked_type == t1
 
-    for opfunc in [relay.pow]:
-        check_binary_op(opfunc)
+        if ref is not None:
+            t1 = relay.TensorType((5, 10, 5))
+            t2 = relay.TensorType((5, 10, 5))
+            x = relay.var("x", t1)
+            y = relay.var("y", t2)
+            z = opfunc(x, y)
+            x_data = np.random.rand(5, 10, 5).astype(t1.dtype)
+            y_data = np.random.rand(5, 10, 5).astype(t2.dtype)
+            intrp = create_executor()
+            op_res = intrp.evaluate(z, { x: relay.const(x_data), y: relay.const(y_data) })
+            ref_res = ref(x_data, y_data)
+            np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
+
+    for opfunc, ref in [(relay.pow, np.power)]:
+        check_binary_op(opfunc, ref)
 
 
 def test_cmp_type():
-    for op in (relay.greater,
-               relay.greater_equal,
-               relay.less,
-               relay.less_equal,
-               relay.equal,
-               relay.not_equal):
+    for op, ref in ((relay.greater, np.greater),
+               (relay.greater_equal, np.greater_equal),
+               (relay.less, np.less),
+               (relay.less_equal, np.less_equal),
+               (relay.equal, np.equal),
+               (relay.not_equal, np.not_equal)):
         x = relay.var("x", relay.TensorType((10, 4), "float32"))
         y = relay.var("y", relay.TensorType((5, 10, 1), "float32"))
         z = op(x, y)
@@ -33,18 +47,44 @@ def test_cmp_type():
         zz = relay.ir_pass.infer_type(z)
         assert zz.checked_type == relay.TensorType((5, 10, 4), "bool")
 
+        if ref is not None:
+            x_shape = (10, 4)
+            y_shape = (5, 10, 1)
+            t1 = relay.TensorType(x_shape)
+            t2 = relay.TensorType(y_shape)
+            x = relay.var("x", t1)
+            y = relay.var("y", t2)
+            z = op(x, y)
+            x_data = np.random.rand(*x_shape).astype(t1.dtype)
+            y_data = np.random.rand(*y_shape).astype(t2.dtype)
+            intrp = create_executor()
+            op_res = intrp.evaluate(z, { x: relay.const(x_data), y: relay.const(y_data) })
+            ref_res = ref(x_data, y_data)
+            np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
+
 
 def test_binary_int_broadcast():
-    for op in [relay.right_shift,
-               relay.left_shift,
-               relay.maximum,
-               relay.minimum]:
+    for op, ref in [(relay.right_shift, np.right_shift),
+               (relay.left_shift, np.left_shift),
+               (relay.maximum, np.maximum),
+               (relay.minimum, np.minimum)]:
         x = relay.var("x", relay.TensorType((10, 4), "int32"))
         y = relay.var("y", relay.TensorType((5, 10, 1), "int32"))
         z = op(x, y)
         zz = relay.ir_pass.infer_type(z)
         assert zz.checked_type == relay.TensorType((5, 10, 4), "int32")
 
+    if ref is not None:
+        x_shape = (10, 4)
+        y_shape = (5, 10, 1)
+        t1 = relay.TensorType(x_shape, 'int32')
+        t2 = relay.TensorType(y_shape, 'int32')
+        x_data = np.random.rand(*x_shape).astype(t1.dtype)
+        y_data = np.random.rand(*y_shape).astype(t2.dtype)
+        intrp = create_executor()
+        op_res = intrp.evaluate(z, { x: relay.const(x_data), y: relay.const(y_data) })
+        ref_res = ref(x_data, y_data)
+        np.testing.assert_allclose(op_res.asnumpy(), ref_res, rtol=0.01)
 
 def test_where():
     cond = relay.var("cond", relay.TensorType((3, 4), "float32"))