diff --git a/python/tvm/relay/op/tensor.py b/python/tvm/relay/op/tensor.py
index b425ff8f7537c90d05c9780f8b272bfd8da32919..c8c42c1a6ca40b2de339514e71352ac6a959e472 100644
--- a/python/tvm/relay/op/tensor.py
+++ b/python/tvm/relay/op/tensor.py
@@ -213,6 +213,24 @@ def greater_equal(lhs, rhs):
     return _make.greater_equal(lhs, rhs)
 
 
+def right_shift(lhs, rhs):
+    """Right shift with numpy-style broadcasting.
+
+    Parameters
+    ----------
+    lhs : relay.Expr
+        The left hand side input data
+    rhs : relay.Expr
+        The right hand side input data
+
+    Returns
+    -------
+    result : relay.Expr
+        The computed result.
+    """
+    return _make.right_shift(lhs, rhs)
+
+
 def concat(*args):
     """Concatenate the input tensors along the zero axis.
 
diff --git a/src/relay/op/tensor/binary.cc b/src/relay/op/tensor/binary.cc
new file mode 100644
index 0000000000000000000000000000000000000000..4c0fa657bac4c8d94ac9af3d9e8239d72b92afd2
--- /dev/null
+++ b/src/relay/op/tensor/binary.cc
@@ -0,0 +1,60 @@
+/*!
+ *  Copyright (c) 2018 by Contributors
+ * \file binary.cc
+ * \brief binary broadcast operators.
+ */
+#include <tvm/relay/expr.h>
+#include <tvm/relay/op.h>
+#include "../type_relations.h"
+
+namespace tvm {
+namespace relay {
+
+#define RELAY_REGISTER_BINARY_OP(OpName)                               \
+  TVM_REGISTER_API("relay.op._make." OpName)                           \
+  .set_body_typed<Expr(Expr, Expr)>([](Expr lhs, Expr rhs) {           \
+      static const Op& op = Op::Get(OpName);                           \
+      return CallNode::make(op, {lhs, rhs}, Attrs(), {});              \
+    });                                                                \
+  RELAY_REGISTER_OP(OpName)                                            \
+  .set_num_inputs(2)                                                   \
+  .add_argument("lhs", "Tensor", "The left hand side tensor.")         \
+  .add_argument("rhs", "Tensor", "The right hand side tensor.")        \
+  .add_type_rel("Broadcast", BroadcastRel)
+
+// Addition
+RELAY_REGISTER_BINARY_OP("add")
+.describe("Elementwise add with with broadcasting")
+.set_support_level(1);
+
+RELAY_REGISTER_BINARY_OP("subtract")
+.describe("Elementwise substract with broadcasting")
+.set_support_level(1);
+
+RELAY_REGISTER_BINARY_OP("right_shift")
+.describe("Elementwise right shift with broadcasting")
+.set_support_level(4);
+
+// Comparisons
+#define RELAY_REGISTER_CMP_OP(OpName, SupportLevel)                 \
+  TVM_REGISTER_API("relay.op._make." OpName)                        \
+  .set_body_typed<Expr(Expr, Expr)>([](Expr lhs, Expr rhs) {        \
+      static const Op& op = Op::Get(OpName);                        \
+    return CallNode::make(op, {lhs, rhs}, Attrs(), {});             \
+  });                                                               \
+  RELAY_REGISTER_OP(OpName)                                         \
+    .set_num_inputs(2)                                              \
+    .add_argument("lhs", "Tensor", "The left hand side tensor.")    \
+    .add_argument("rhs", "Tensor", "The right hand side tensor.")   \
+    .set_support_level(SupportLevel)                                \
+    .add_type_rel("BroadcastComp", BroadcastCompRel);
+
+RELAY_REGISTER_CMP_OP("equal", 4);
+RELAY_REGISTER_CMP_OP("not_equal", 4);
+RELAY_REGISTER_CMP_OP("less", 4);
+RELAY_REGISTER_CMP_OP("less_equal", 4);
+RELAY_REGISTER_CMP_OP("greater", 4);
+RELAY_REGISTER_CMP_OP("greater_equal", 4);
+
+}  // namespace relay
+}  // namespace tvm
diff --git a/src/relay/op/tensor/elemwise.cc b/src/relay/op/tensor/elemwise.cc
deleted file mode 100644
index 738f0f0f1f898c55d7fa242edf8d25ba67c3adf8..0000000000000000000000000000000000000000
--- a/src/relay/op/tensor/elemwise.cc
+++ /dev/null
@@ -1,144 +0,0 @@
-/*!
- *  Copyright (c) 2018 by Contributors
- * \file elemwise.cc
- * \brief Elementwise operators.
- */
-#include <tvm/relay/expr.h>
-#include <tvm/relay/op.h>
-#include "../type_relations.h"
-
-namespace tvm {
-namespace relay {
-
-// Quick helper macro
-// - Expose a positional make function to construct the node.
-// - Register op to the registry.
-//
-// We make the decision to always only expose positional argument.
-// We will do rewrapping in the frontend to support language
-// sugars such as keyword arguments and default value.
-//
-#define RELAY_REGISTER_UNARY_OP(OpName)               \
-  TVM_REGISTER_API("relay.op._make." OpName)          \
-  .set_body_typed<Expr(Expr)>([](Expr data) {         \
-      static const Op& op = Op::Get(OpName);          \
-    return CallNode::make(op, {data}, Attrs(), {});   \
-    });                                               \
-  RELAY_REGISTER_OP(OpName)                           \
-  .set_num_inputs(1)                                  \
-  .add_argument("data", "Tensor", "The input tensor.")
-
-
-RELAY_REGISTER_UNARY_OP("log")
-.describe(R"code(Returns the log input array, computed element-wise.
-
-.. math::
-   log(x)
-
-)code" TVM_ADD_FILELINE)
-.set_support_level(1)
-.add_type_rel("Identity", IdentityRel);
-
-// data : Tensor[shape, dtype]
-// result: Tensor[shape, dtype]
-
-
-RELAY_REGISTER_UNARY_OP("exp")
-.describe(R"code(Returns the exp input array, computed element-wise.
-
-.. math::
-   \exp(x)
-
-)code" TVM_ADD_FILELINE)
-.set_support_level(1)
-.add_type_rel("Identity", IdentityRel);
-
-
-RELAY_REGISTER_UNARY_OP("sqrt")
-.describe(R"code(Returns the sqrt input array, computed element-wise.
-
-.. math::
-   sqrt(x)
-
-)code" TVM_ADD_FILELINE)
-.set_support_level(1)
-.add_type_rel("Identity", IdentityRel);
-
-// Addition
-TVM_REGISTER_API("relay.op._make.add")
-  .set_body_typed<Expr(Expr, Expr)>([](Expr lhs, Expr rhs) {
-      static const Op& op = Op::Get("add");
-    return CallNode::make(op, {lhs, rhs}, Attrs(), {});
-  });
-
-RELAY_REGISTER_OP("add")
-  .set_num_inputs(2)
-  .add_argument("lhs", "Tensor", "The left hand side tensor.")
-  .add_argument("rhs", "Tensor", "The right hand side tensor.")
-  .set_support_level(1)
-  .add_type_rel("Broadcast", BroadcastRel);
-
-  // def broadcast(s1, s2):
-  // ...
-  //
-  // input1: Tensor[dtype, s1]
-  // input2: Tensor[dtype, s2]
-  // output: Tensor[dtype, broadcast(s1, s2)]
-
-// Addition
-TVM_REGISTER_API("relay.op._make.subtract")
-  .set_body_typed<Expr(Expr, Expr)>([](Expr lhs, Expr rhs) {
-      static const Op& op = Op::Get("subtract");
-    return CallNode::make(op, {lhs, rhs}, Attrs(), {});
-  });
-
-RELAY_REGISTER_OP("subtract")
-  .set_num_inputs(2)
-  .add_argument("lhs", "Tensor", "The left hand side tensor.")
-  .add_argument("rhs", "Tensor", "The right hand side tensor.")
-  .set_support_level(1)
-  .add_type_rel("Broadcast", BroadcastRel);
-
-  // def broadcast(s1, s2):
-  // ...
-  //
-  // input1: Tensor[dtype, s1]
-  // input2: Tensor[dtype, s2]
-  // output: Tensor[dtype, broadcast(s1, s2)]
-
-// Comparisons
-#define RELAY_REGISTER_CMP_OP(OpName, SupportLevel)                 \
-  TVM_REGISTER_API("relay.op._make." OpName)                        \
-  .set_body_typed<Expr(Expr, Expr)>([](Expr lhs, Expr rhs) {        \
-      static const Op& op = Op::Get(OpName);                        \
-    return CallNode::make(op, {lhs, rhs}, Attrs(), {});             \
-  });                                                               \
-  RELAY_REGISTER_OP(OpName)                                         \
-    .set_num_inputs(2)                                              \
-    .add_argument("lhs", "Tensor", "The left hand side tensor.")    \
-    .add_argument("rhs", "Tensor", "The right hand side tensor.")   \
-    .set_support_level(SupportLevel)                                \
-    .add_type_rel("BroadcastComp", BroadcastCompRel);
-
-RELAY_REGISTER_CMP_OP("equal", 4);
-RELAY_REGISTER_CMP_OP("not_equal", 4);
-RELAY_REGISTER_CMP_OP("less", 4);
-RELAY_REGISTER_CMP_OP("less_equal", 4);
-RELAY_REGISTER_CMP_OP("greater", 4);
-RELAY_REGISTER_CMP_OP("greater_equal", 4);
-
-// Concat
-TVM_REGISTER_API("relay.op._make.concat")
-  .set_body_typed<Expr(Expr)>([](Expr tuple) {
-      static const Op& op = Op::Get("concat");
-    return CallNode::make(op, { tuple }, Attrs(), {});
-  });
-
-RELAY_REGISTER_OP("concat")
-  .set_num_inputs(1)
-  .add_argument("tuple", "Tuple", "The tupled tensor arguments.")
-  .set_support_level(1)
-  .add_type_rel("Concat", ConcatRel);
-
-}  // namespace relay
-}  // namespace tvm
diff --git a/src/relay/op/tensor/unary.cc b/src/relay/op/tensor/unary.cc
new file mode 100644
index 0000000000000000000000000000000000000000..798d4aa791adcf59937112fe995aedd398e41ee1
--- /dev/null
+++ b/src/relay/op/tensor/unary.cc
@@ -0,0 +1,82 @@
+/*!
+ *  Copyright (c) 2018 by Contributors
+ * \file unary.cc
+ * \brief Unary operators.
+ */
+#include <tvm/relay/expr.h>
+#include <tvm/relay/op.h>
+#include "../type_relations.h"
+
+namespace tvm {
+namespace relay {
+
+// Quick helper macro
+// - Expose a positional make function to construct the node.
+// - Register op to the registry.
+//
+// We make the decision to always only expose positional argument.
+// We will do rewrapping in the frontend to support language
+// sugars such as keyword arguments and default value.
+//
+#define RELAY_REGISTER_UNARY_OP(OpName)               \
+  TVM_REGISTER_API("relay.op._make." OpName)          \
+  .set_body_typed<Expr(Expr)>([](Expr data) {         \
+      static const Op& op = Op::Get(OpName);          \
+    return CallNode::make(op, {data}, Attrs(), {});   \
+    });                                               \
+  RELAY_REGISTER_OP(OpName)                           \
+  .set_num_inputs(1)                                  \
+  .add_argument("data", "Tensor", "The input tensor.")
+
+
+RELAY_REGISTER_UNARY_OP("log")
+.describe(R"code(Returns the log input array, computed element-wise.
+
+.. math::
+   log(x)
+
+)code" TVM_ADD_FILELINE)
+.set_support_level(1)
+.add_type_rel("Identity", IdentityRel);
+
+// data : Tensor[shape, dtype]
+// result: Tensor[shape, dtype]
+
+
+RELAY_REGISTER_UNARY_OP("exp")
+.describe(R"code(Returns the exp input array, computed element-wise.
+
+.. math::
+   \exp(x)
+
+)code" TVM_ADD_FILELINE)
+.set_support_level(1)
+.add_type_rel("Identity", IdentityRel);
+
+
+RELAY_REGISTER_UNARY_OP("sqrt")
+.describe(R"code(Returns the sqrt input array, computed element-wise.
+
+.. math::
+   sqrt(x)
+
+)code" TVM_ADD_FILELINE)
+.set_support_level(1)
+.add_type_rel("Identity", IdentityRel);
+
+
+// Concat
+TVM_REGISTER_API("relay.op._make.concat")
+  .set_body_typed<Expr(Expr)>([](Expr tuple) {
+      static const Op& op = Op::Get("concat");
+    return CallNode::make(op, { tuple }, Attrs(), {});
+  });
+
+RELAY_REGISTER_OP("concat")
+.set_num_inputs(1)
+.add_argument("tuple", "Tuple", "The tupled tensor arguments.")
+.set_support_level(1)
+.add_type_rel("Concat", ConcatRel);
+
+}  // namespace relay
+}  // namespace tvm
diff --git a/tests/python/relay/test_op_level4.py b/tests/python/relay/test_op_level4.py
index 726bc7623a2ac531ff16ee53e798fc3a939da228..5009994871f7af3c8dcfff731c04b61bab024011 100644
--- a/tests/python/relay/test_op_level4.py
+++ b/tests/python/relay/test_op_level4.py
@@ -20,5 +20,19 @@ def test_cmp_type():
         assert ftype.ret_type == relay.TensorType((5, 10, 4), "uint1")
 
 
+def test_binary_broadcast():
+    for op in [relay.right_shift]:
+        ib = relay.ir_builder.IRBuilder()
+        x = ib.param("x", relay.TensorType((10, 4), "int32"))
+        y = ib.param("y", relay.TensorType((5, 10, 1), "int32"))
+        with ib.function(x, y) as func:
+            ib.ret(op(x.var, y.var))
+        ib.ret(func)
+        func = relay.ir_pass.infer_type(ib.env, func.to_func())
+        ftype = func.checked_type()
+        assert ftype.ret_type == relay.TensorType((5, 10, 4), "int32")
+
+
 if __name__ == "__main__":
     test_cmp_type()
+    test_binary_broadcast()