diff --git a/Jenkinsfile b/Jenkinsfile
index b667359f0f2b1cafba5526c951ab3190386e97a1..7fb8e6b8b5d1f73bc7417e23d662b27dc38bafec 100644
--- a/Jenkinsfile
+++ b/Jenkinsfile
@@ -87,7 +87,6 @@ stage('Build') {
            cp make/config.mk .
            echo USE_CUDNN=1 >> config.mk
            echo USE_CUDA=1 >> config.mk
-           echo USE_OPENCL=1 >> config.mk
            echo USE_OPENGL=1 >> config.mk
            echo LLVM_CONFIG=llvm-config-4.0 >> config.mk
            echo USE_RPC=1 >> config.mk
@@ -105,6 +104,7 @@ stage('Build') {
         sh "mv lib/libtvm.so lib/libtvm_llvm60.so"
         pack_lib('gpu', tvm_multilib)
         sh """
+           echo USE_OPENCL=1 >> config.mk
            echo USE_ROCM=1 >> config.mk
            echo ROCM_PATH=/opt/rocm >> config.mk
            echo USE_VULKAN=1 >> config.mk
@@ -152,31 +152,6 @@ stage('Build') {
         pack_lib('i386', tvm_multilib)
       }
     }
-  },
-  'web': {
-    node('emcc') {
-      ws('workspace/tvm/build-weblib') {
-        init_git()
-        sh """
-           cp make/config.mk .
-           echo USE_CUDA=0 >> config.mk
-           echo USE_OPENCL=0 >> config.mk
-           echo LLVM_CONFIG=llvm-config >> config.mk
-           echo USE_RPC=0 >> config.mk
-           """
-        sh "${docker_run} emscripten echo testing javascript..."
-        timeout(time: max_time, unit: 'MINUTES') {
-          try {
-            sh "${docker_run} emscripten ./tests/scripts/task_web_build.sh"
-          } catch (exc) {
-            echo 'Incremental compilation failed. Fall back to build from scratch'
-            sh "${docker_run} emscripten make clean"
-            sh "${docker_run} emscripten ./tests/scripts/task_web_build.sh"
-         }
-        }
-        pack_lib('weblib', tvm_lib)
-      }
-    }
   }
 }
 
@@ -256,18 +231,6 @@ stage('Integration Test') {
       }
     }
   },
-  'web': {
-    node('emcc') {
-      ws('workspace/tvm/it-weblib') {
-        init_git()
-        unpack_lib('weblib', tvm_lib)
-        sh "${docker_run} emscripten echo testing javascript..."
-        timeout(time: max_time, unit: 'MINUTES') {
-          sh "${docker_run} emscripten ./tests/scripts/task_web_test.sh"
-        }
-      }
-    }
-  },
   'docs': {
     node('GPU' && 'linux') {
       ws('workspace/tvm/docs-python-gpu') {
diff --git a/python/tvm/_ffi/function.py b/python/tvm/_ffi/function.py
index e0f85be6f1a924684d47486124ba68cf4569d77d..cfda2a35f9b98d543c55ec2b86927f124bb77efb 100644
--- a/python/tvm/_ffi/function.py
+++ b/python/tvm/_ffi/function.py
@@ -181,10 +181,10 @@ def register_func(func_name, f=None, override=False):
             myf = convert_to_tvm_func(myf)
         check_call(_LIB.TVMFuncRegisterGlobal(
             c_str(func_name), myf.handle, ioverride))
+        return myf
     if f:
-        register(f)
-    else:
-        return register
+        return register(f)
+    return register
 
 
 def get_global_func(name, allow_missing=False):
diff --git a/python/tvm/api.py b/python/tvm/api.py
index 66c154bc9f0012bcd76297a41d7a3205d6f24a12..b827cce72896b31e7cf0cd677549d8ba115a78b2 100644
--- a/python/tvm/api.py
+++ b/python/tvm/api.py
@@ -652,6 +652,7 @@ def comm_reducer(fcombine, fidentity, name="reduce"):
                         for i in range(size))
         return outputs[0] if size == 1 else outputs
 
+    # pylint: disable=keyword-arg-before-vararg
     def reducer(expr, axis, where=None, *args):
         if isinstance(axis, (_schedule.IterVar, list, tuple)):
             assert not args
diff --git a/tests/ci_build/Dockerfile.emscripten b/tests/ci_build/Dockerfile.emscripten
index 59bf02ea7d2ccfcb9c996a651c3dacc96b709d04..b4d5a63c52efff7480e4404e68d5e3e79ecd34f7 100644
--- a/tests/ci_build/Dockerfile.emscripten
+++ b/tests/ci_build/Dockerfile.emscripten
@@ -15,4 +15,8 @@ RUN bash /install/ubuntu_install_emscripten.sh
 COPY install/ubuntu_install_python_package.sh /install/ubuntu_install_python_package.sh
 RUN bash /install/ubuntu_install_python_package.sh
 
-RUN cp /root/.emscripten /emsdk-portable/
\ No newline at end of file
+RUN chmod a+rwx -R /emsdk-portable
+RUN cp -r /emsdk-portable  /emsdk-portable-backup
+RUN mv /emsdk-portable  /emsdk-portable-x
+RUN mv /emsdk-portable-backup /emsdk-portable
+RUN cp /root/.emscripten /emsdk-portable/
diff --git a/tests/ci_build/Dockerfile.gpu b/tests/ci_build/Dockerfile.gpu
index e49e498b8d406a26031a47e46f317cd8f4a6b334..4b461ebf19c6882d1ee3e15c48e780106d494b57 100644
--- a/tests/ci_build/Dockerfile.gpu
+++ b/tests/ci_build/Dockerfile.gpu
@@ -1,7 +1,6 @@
 FROM nvidia/cuda:8.0-cudnn7-devel
 
 # Base scripts
-RUN apt-get update --fix-missing
 
 COPY install/ubuntu_install_core.sh /install/ubuntu_install_core.sh
 RUN bash /install/ubuntu_install_core.sh
@@ -12,9 +11,6 @@ RUN bash /install/ubuntu_install_python.sh
 COPY install/ubuntu_install_llvm.sh /install/ubuntu_install_llvm.sh
 RUN bash /install/ubuntu_install_llvm.sh
 
-COPY install/ubuntu_install_opencl.sh /install/ubuntu_install_opencl.sh
-RUN bash /install/ubuntu_install_opencl.sh
-
 COPY install/ubuntu_install_iverilog.sh /install/ubuntu_install_iverilog.sh
 RUN bash /install/ubuntu_install_iverilog.sh
 
@@ -40,8 +36,11 @@ RUN bash /install/ubuntu_install_rocm.sh
 COPY install/ubuntu_install_opengl.sh /install/ubuntu_install_opengl.sh
 RUN bash /install/ubuntu_install_opengl.sh
 
+COPY install/ubuntu_install_opencl.sh /install/ubuntu_install_opencl.sh
+RUN bash /install/ubuntu_install_opencl.sh
+
 # Enable doxygen for c++ doc build
-RUN apt-get install -y doxygen graphviz
+RUN apt-get update && apt-get install -y doxygen graphviz
 
 # Install vulkan
 COPY install/ubuntu_install_vulkan.sh /install/ubuntu_install_vulkan.sh
diff --git a/tests/ci_build/install/ubuntu_install_core.sh b/tests/ci_build/install/ubuntu_install_core.sh
index 9823ae0788ac6c1fe18513e1a551fe7a4f722653..efc69c946b978827a68beb2a642dcc5546db5a47 100644
--- a/tests/ci_build/install/ubuntu_install_core.sh
+++ b/tests/ci_build/install/ubuntu_install_core.sh
@@ -1,5 +1,5 @@
 # install libraries for building c++ core on ubuntu
-apt-get install -y --no-install-recommends --force-yes \
+apt-get update && apt-get install -y --no-install-recommends --force-yes \
         git make libgtest-dev cmake wget unzip libtinfo-dev libz-dev\
         libcurl4-openssl-dev libopenblas-dev g++ sudo
 
diff --git a/tests/ci_build/install/ubuntu_install_llvm.sh b/tests/ci_build/install/ubuntu_install_llvm.sh
index e5b28b911f61eb691b6b039dca7996f726ee3964..ba0afcd18cc9d6a88749433f2b534feb9411c911 100644
--- a/tests/ci_build/install/ubuntu_install_llvm.sh
+++ b/tests/ci_build/install/ubuntu_install_llvm.sh
@@ -8,6 +8,11 @@ echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
 echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-5.0 main\
      >> /etc/apt/sources.list.d/llvm.list
 
+echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main\
+     >> /etc/apt/sources.list.d/llvm.list
+echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial-6.0 main\
+     >> /etc/apt/sources.list.d/llvm.list
+
 echo deb http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
      >> /etc/apt/sources.list.d/llvm.list
 echo deb-src http://apt.llvm.org/xenial/ llvm-toolchain-xenial main\
diff --git a/tests/ci_build/install/ubuntu_install_opencl.sh b/tests/ci_build/install/ubuntu_install_opencl.sh
index 636236539a984b0afa82f06441442de1e324faa9..ca4d1d04fd5c266abf8838440980896643b13643 100644
--- a/tests/ci_build/install/ubuntu_install_opencl.sh
+++ b/tests/ci_build/install/ubuntu_install_opencl.sh
@@ -1,8 +1,8 @@
 # Install OpenCL runtime in nvidia docker.
-apt-get install -y --no-install-recommends --force-yes \
-        ocl-icd-libopencl1 \
+apt-get update && apt-get install -y --no-install-recommends --force-yes \
+        ocl-icd-opencl-dev \
         clinfo && \
-        rm -rf /var/lib/apt/lists/*
+    rm -rf /var/lib/apt/lists/*
 
 mkdir -p /etc/OpenCL/vendors && \
     echo "libnvidia-opencl.so.1" > /etc/OpenCL/vendors/nvidia.icd
diff --git a/tests/ci_build/install/ubuntu_install_sphinx.sh b/tests/ci_build/install/ubuntu_install_sphinx.sh
index 767643f104886ba9a941efa054d7612c35332b12..30d4596aabf2923490eb222ad6edcbd1991fab28 100644
--- a/tests/ci_build/install/ubuntu_install_sphinx.sh
+++ b/tests/ci_build/install/ubuntu_install_sphinx.sh
@@ -1 +1 @@
-pip install sphinx==1.6.2 sphinx-gallery sphinx_rtd_theme matplotlib Image commonmark>=0.7.3 docutils>=0.11
+pip install sphinx sphinx-gallery sphinx_rtd_theme matplotlib Image commonmark>=0.7.3 docutils>=0.11
diff --git a/topi/python/topi/cuda/conv2d_transpose_nchw.py b/topi/python/topi/cuda/conv2d_transpose_nchw.py
index edd255a8fac036ca8da4fd8d48124fbe83869e29..0d439bfdfdeaa4ce2f4d7986d7d77adba410abf9 100644
--- a/topi/python/topi/cuda/conv2d_transpose_nchw.py
+++ b/topi/python/topi/cuda/conv2d_transpose_nchw.py
@@ -74,6 +74,7 @@ def schedule_conv2d_transpose_small_batch(outs):
             conv2d_56_64_64(s, Filter, temp_S, Filter_S, Out, Out_L)
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_injective(OP.tag):
             if OP not in s.outputs:
diff --git a/topi/python/topi/cuda/dense.py b/topi/python/topi/cuda/dense.py
index 6207c14220d05fd48e821efce9e9993c42f773ba..7c62fab743f5b8fcdfea5cba2a070614067d7261 100644
--- a/topi/python/topi/cuda/dense.py
+++ b/topi/python/topi/cuda/dense.py
@@ -87,6 +87,7 @@ def schedule_dense(outs):
         s[Out].set_store_predicate(thread_x.var.equal(0))
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
diff --git a/topi/python/topi/cuda/depthwise_conv2d.py b/topi/python/topi/cuda/depthwise_conv2d.py
index 851a00db0a481acd54cd9f9cdbef406771b8d564..3045774296d02d93de9b68b2cb40d88e81f3300b 100644
--- a/topi/python/topi/cuda/depthwise_conv2d.py
+++ b/topi/python/topi/cuda/depthwise_conv2d.py
@@ -102,6 +102,7 @@ def schedule_depthwise_conv2d_nchw(outs):
         s[FS].bind(tx, thread_x)
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
@@ -178,6 +179,7 @@ def schedule_depthwise_conv2d_nhwc(outs):
         s[FS].bind(fused, thread_x)
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
diff --git a/topi/python/topi/cuda/pooling.py b/topi/python/topi/cuda/pooling.py
index 4ed5ae66c19b92e6cd5fb7d7d3a9a194a91d0f64..95ef3ce67c5cfe3d0153b28ca7e381f9c1940f96 100644
--- a/topi/python/topi/cuda/pooling.py
+++ b/topi/python/topi/cuda/pooling.py
@@ -47,6 +47,7 @@ def schedule_global_pool(outs):
             s[Pool].compute_at(s[Out], tx)
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
@@ -101,6 +102,7 @@ def schedule_pool(outs):
             s[Pool].compute_at(s[Out], tx)
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
diff --git a/topi/python/topi/cuda/reduction.py b/topi/python/topi/cuda/reduction.py
index 932f2aae30988d071f5448db84d0ce46584e99b9..601d11ab9a2ff0f7a51cb9c2a3e3fa6ad9de10da 100644
--- a/topi/python/topi/cuda/reduction.py
+++ b/topi/python/topi/cuda/reduction.py
@@ -87,6 +87,7 @@ def schedule_reduce(outs):
     sch = tvm.create_schedule([x.op for x in outs])
 
     def traverse_before_reduce(operator):
+        """Internal travserse function"""
         if isinstance(operator, tvm.tensor.PlaceholderOp):
             return
         elif tag.is_injective(operator.tag):
@@ -97,6 +98,7 @@ def schedule_reduce(outs):
             raise RuntimeError("Unsupported operator: %s" % operator.tag)
 
     def traverse_after_reduce(operator):
+        """Internal travserse function"""
         if tag.is_broadcast(operator.tag):
             raise RuntimeError("Not yet support ewise after reduce")
         elif operator.tag == 'comm_reduce':
diff --git a/topi/python/topi/mali/dense.py b/topi/python/topi/mali/dense.py
index d3edeafed3b3fdc3b1ee96bd46b171220a46e859..ff88ce51866fafc86db5d8501f741d5ee643adec 100644
--- a/topi/python/topi/mali/dense.py
+++ b/topi/python/topi/mali/dense.py
@@ -82,6 +82,7 @@ def schedule_dense(outs):
 #        print(tvm.lower(s, [data, weight, bias, outs[0]], simple_mode=True))
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
diff --git a/topi/python/topi/mali/depthwise_conv2d.py b/topi/python/topi/mali/depthwise_conv2d.py
index 46ce7f747def93de3deec8601a6e416026ca2d15..428140550528706f18f1740736ccfc8e6d8fbbad 100644
--- a/topi/python/topi/mali/depthwise_conv2d.py
+++ b/topi/python/topi/mali/depthwise_conv2d.py
@@ -87,6 +87,7 @@ def schedule_depthwise_conv2d_nchw(outs):
             s[conv].compute_at(s[output], ji)
 
     def traverse(op):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(op.tag):
             if op not in s.outputs:
diff --git a/topi/python/topi/nn/bnn.py b/topi/python/topi/nn/bnn.py
index 39b9d2a15a1b75c96f9a830d19b91d49feb56cd8..591a082e7d30628891ec8a6417e2d68af2ed3d51 100644
--- a/topi/python/topi/nn/bnn.py
+++ b/topi/python/topi/nn/bnn.py
@@ -43,6 +43,7 @@ def binarize_pack(data, axis=None, name="PackedInput"):
             if j == 31:
                 return packed
             packed = packed << 1
+        raise RuntimeError("not resach")
 
     return tvm.compute(oshape, _binarize_pack, name=name, tag='binarize_pack')
 
diff --git a/topi/python/topi/opengl/conv2d_nchw.py b/topi/python/topi/opengl/conv2d_nchw.py
index c633d8a21e6ea1f8a7e2da1b628bb9a53c8e248d..7e8b7275f75dbf95a55ce07de85ee9d403fb2739 100644
--- a/topi/python/topi/opengl/conv2d_nchw.py
+++ b/topi/python/topi/opengl/conv2d_nchw.py
@@ -31,6 +31,7 @@ def schedule_conv2d_nchw(outs):
         s[data].opengl()
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
diff --git a/topi/python/topi/opengl/dense.py b/topi/python/topi/opengl/dense.py
index e7cf008ae240015f1eddfd88496e7af8bc1a9c38..e4d327afa4d620d6132247844ce8e47c9ce8184e 100644
--- a/topi/python/topi/opengl/dense.py
+++ b/topi/python/topi/opengl/dense.py
@@ -31,6 +31,7 @@ def schedule_dense(outs):
         s[Out].opengl()
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
diff --git a/topi/python/topi/opengl/pooling.py b/topi/python/topi/opengl/pooling.py
index 5c26c56bb1ac235f269770071041935e8737749b..dc4f47609f92374078ea38441bc1ae9123daac17 100644
--- a/topi/python/topi/opengl/pooling.py
+++ b/topi/python/topi/opengl/pooling.py
@@ -30,6 +30,7 @@ def schedule_global_pool(outs):
         s[Out].opengl()
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
@@ -75,6 +76,7 @@ def schedule_pool(outs):
         s[Out].opengl()
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
diff --git a/topi/python/topi/rasp/depthwise_conv2d.py b/topi/python/topi/rasp/depthwise_conv2d.py
index a6fd691f843f9f1622e73208e8240f806fa86610..b7cb6570987a7e2b5079615e854e17b788f36940 100644
--- a/topi/python/topi/rasp/depthwise_conv2d.py
+++ b/topi/python/topi/rasp/depthwise_conv2d.py
@@ -164,6 +164,7 @@ def schedule_depthwise_conv2d(outs):
     s = tvm.create_schedule([x.op for x in outs])
 
     def traverse(op):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(op.tag):
             if op not in s.outputs:
diff --git a/topi/python/topi/x86/binary_dense.py b/topi/python/topi/x86/binary_dense.py
index 11fccba7b64461ddc996072049cfa5cbe8e9d4c8..8b28dd728842814465caf0919ead3041f3487d9e 100644
--- a/topi/python/topi/x86/binary_dense.py
+++ b/topi/python/topi/x86/binary_dense.py
@@ -35,6 +35,7 @@ def schedule_binary_dense(outs):
         s[Out].vectorize(xi)
 
     def traverse(OP):
+        """Internal travserse function"""
         # inline all one-to-one-mapping operators except the last stage (output)
         if tag.is_broadcast(OP.tag):
             if OP not in s.outputs:
diff --git a/tutorials/deployment/cross_compilation_and_rpc.py b/tutorials/deployment/cross_compilation_and_rpc.py
index ccaf9e79e3de3bcb45e88c5dd6287db93cda85df..f06bbfca64077f5b3841dead5909a65c9a4cfa25 100644
--- a/tutorials/deployment/cross_compilation_and_rpc.py
+++ b/tutorials/deployment/cross_compilation_and_rpc.py
@@ -108,8 +108,6 @@ import tvm
 import numpy as np
 from tvm.contrib import rpc, util
 
-server = rpc.Server(host='0.0.0.0', port=9090, use_popen=True)
-
 ######################################################################
 # Declare and Cross Compile Kernel on Local Machine
 # -------------------------------------------------
@@ -241,47 +239,52 @@ print('%g secs/op' % cost)
 #    But here we set 'llvm' to enable this tutorial to run locally.
 #
 #    Also we need to build the runtime with the flag `USE_OPENCL=1`.
-
 # build kernel (different from cpu, we need bind axis for OpenCL)
-s = tvm.create_schedule(B.op)
-xo, xi = s[B].split(B.op.axis[0], factor=32)
-s[B].bind(xo, tvm.thread_axis("blockIdx.x"))
-s[B].bind(xi, tvm.thread_axis("threadIdx.x"))
-f = tvm.build(s, [A, B], "opencl", target_host="llvm", name="myadd")
+#
+# The following functions shows how we can deploy CL
+def deploy_cl():
+    s = tvm.create_schedule(B.op)
+    xo, xi = s[B].split(B.op.axis[0], factor=32)
+    s[B].bind(xo, tvm.thread_axis("blockIdx.x"))
+    s[B].bind(xi, tvm.thread_axis("threadIdx.x"))
+    f = tvm.build(s, [A, B], "opencl", target_host="llvm", name="myadd")
 
-# save files
-path_o = temp.relpath("myadd.o")
-path_cl = temp.relpath("myadd.cl")
-path_json = temp.relpath("myadd.tvm_meta.json")
-f.save(path_o)
-f.imported_modules[0].save(path_cl)
+    # save files
+    path_o = temp.relpath("myadd.o")
+    path_cl = temp.relpath("myadd.cl")
+    path_json = temp.relpath("myadd.tvm_meta.json")
+    f.save(path_o)
+    f.imported_modules[0].save(path_cl)
 
-# upload files
-remote.upload(path_o)
-remote.upload(path_cl)
-remote.upload(path_json)
+    # upload files
+    remote.upload(path_o)
+    remote.upload(path_cl)
+    remote.upload(path_json)
 
-# load files on remote device
-fhost = remote.load_module("myadd.o")
-fdev = remote.load_module("myadd.cl")
-fhost.import_module(fdev)
+    # load files on remote device
+    fhost = remote.load_module("myadd.o")
+    fdev = remote.load_module("myadd.cl")
+    fhost.import_module(fdev)
+
+    # run
+    ctx = remote.cl(0)
+    a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx)
+    b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx)
+    fhost(a, b)
+    np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
 
-# run
-ctx = remote.cl(0)
-a = tvm.nd.array(np.random.uniform(size=1024).astype(A.dtype), ctx)
-b = tvm.nd.array(np.zeros(1024, dtype=A.dtype), ctx)
-fhost(a, b)
-np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
 
 #####################################################################
 # Instead of uploading files separately, there is a more convinient way.
 # You can export libraray as a tar ball.
-path_tar = temp.relpath("myadd.tar")
-f.export_library(path_tar)
-remote.upload(path_tar)
-fhost = remote.load_module("myadd.tar")
-fhost(a, b)
-np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
+# The following functions shows how we can deploy by tar ball
+def deploy_cl_by_tar():
+    path_tar = temp.relpath("myadd.tar")
+    f.export_library(path_tar)
+    remote.upload(path_tar)
+    fhost = remote.load_module("myadd.tar")
+    fhost(a, b)
+    np.testing.assert_equal(b.asnumpy(), a.asnumpy() + 1)
 
 # terminate the server after experiment
 server.terminate()