diff --git a/include/tvm/ir_pass.h b/include/tvm/ir_pass.h index ab42cfc9625fc87b4076573df64369eed8851990..9403a2e6151bcb6f7c635b4a153a3c655bd690cb 100644 --- a/include/tvm/ir_pass.h +++ b/include/tvm/ir_pass.h @@ -217,7 +217,7 @@ Stmt NarrowChannelAccess(Stmt stmt); * \param auto_max_step The maximum step before stop attach automatic unroll * \param auto_max_depth The maximum depth before stop attach automatic unroll * \param auto_max_extent The maximum extent of the loop we can unroll, - * this is an legacy option that donot take the loop total steps into account. + * this is an legacy option that do not take the loop total steps into account. * \param explicit_unroll Whether explicitly unroll the loop, or leave unroll annotation to codegen. * \return Transformed stmt. */ diff --git a/python/tvm/_ffi/runtime_ctypes.py b/python/tvm/_ffi/runtime_ctypes.py index 4f94e0e62d0a7be26fd240e6609370ad80f9e065..2aced1aef7d29b934a5a4ee14fbd3a31482cb743 100644 --- a/python/tvm/_ffi/runtime_ctypes.py +++ b/python/tvm/_ffi/runtime_ctypes.py @@ -67,7 +67,7 @@ class TVMType(ctypes.Structure): bits = 64 head = "" else: - raise ValueError("Donot know how to handle type %s" % type_str) + raise ValueError("Do not know how to handle type %s" % type_str) bits = int(head) if head else bits self.bits = bits diff --git a/python/tvm/schedule.py b/python/tvm/schedule.py index 594c2f2dc8bdc6bef0a94edc1f53be4f332641c8..6c261a45345795d8ff2d8b27fddc8e47aa66f636 100644 --- a/python/tvm/schedule.py +++ b/python/tvm/schedule.py @@ -362,7 +362,7 @@ class Stage(NodeBase): """ if nparts is not None: if factor is not None: - raise ValueError("Donot need to provide both outer and nparts") + raise ValueError("Do not need to provide both outer and nparts") outer, inner = _api_internal._StageSplitByNParts(self, parent, nparts) else: if factor is None: diff --git a/python/tvm/tensor_intrin.py b/python/tvm/tensor_intrin.py index 62f8c8897d103d0fa3e27dbc8e7069cb18bf765f..193124b2f9461d5b26dba734fc9776471befc8c4 100644 --- a/python/tvm/tensor_intrin.py +++ b/python/tvm/tensor_intrin.py @@ -72,7 +72,7 @@ def decl_tensor_intrin(op, binds_list = [] for t in inputs: if not isinstance(t.op, _tensor.PlaceholderOp): - raise ValueError("Donot yet support composition op") + raise ValueError("Do not yet support composition op") cfg = current_build_config() for t in tensors: diff --git a/src/codegen/codegen_c.cc b/src/codegen/codegen_c.cc index c3b0d278c7ac1f1388a720f558a1471b94a02e54..d902437dd99023132490a8e6ee5639af07a9c854 100644 --- a/src/codegen/codegen_c.cc +++ b/src/codegen/codegen_c.cc @@ -207,7 +207,7 @@ std::string CodeGenC::GetStructRef( } else if (t.is_int()) { os << "v_int64"; } else { - LOG(FATAL) << "donot know how to handle type" << t; + LOG(FATAL) << "Do not know how to handle type" << t; } os << ")"; return os.str(); diff --git a/src/codegen/verilog/codegen_verilog.cc b/src/codegen/verilog/codegen_verilog.cc index d7e149257fdba4db329af6ba8d90586f6c687259..af3d2fcfe467a38394a945ef5f6de096e13c05e8 100644 --- a/src/codegen/verilog/codegen_verilog.cc +++ b/src/codegen/verilog/codegen_verilog.cc @@ -213,11 +213,11 @@ VerilogValue CodeGenVerilog::VisitExpr_(const UIntImm *op) { return IntConst(op, this); } VerilogValue CodeGenVerilog::VisitExpr_(const FloatImm *op) { - LOG(FATAL) << "Donot support float constant in Verilog"; + LOG(FATAL) << "Do not support float constant in Verilog"; return VerilogValue(); } VerilogValue CodeGenVerilog::VisitExpr_(const StringImm *op) { - LOG(FATAL) << "Donot support string constant in Verilog"; + LOG(FATAL) << "Do not support string constant in Verilog"; return VerilogValue(); } diff --git a/src/op/tensorize.cc b/src/op/tensorize.cc index 6423c4e942e4db4c42bb2deadb253d49f842e2b0..6daaedd16de15a06418436085118edf097b38ac6 100644 --- a/src/op/tensorize.cc +++ b/src/op/tensorize.cc @@ -52,10 +52,10 @@ size_t InferTensorizeRegion( const IterVarAttr& attr = (*iit).second; if (!found_point) { CHECK(!attr->bind_thread.defined()) - << "Donot allow thread in tensorize scope"; + << "Do not allow thread in tensorize scope"; } if (attr->iter_type == kTensorized) { - CHECK(!found_point) << "Donot allow two tensorized point"; + CHECK(!found_point) << "Do not allow two tensorized point"; found_point = true; loc_scope = i - 1; } diff --git a/src/runtime/pack_args.h b/src/runtime/pack_args.h index 0a00e79f07dfd2337e0a4c16d5edd4295c4e674e..5170e5fd9e9ad6d50be5cef643fa3303409db12e 100644 --- a/src/runtime/pack_args.h +++ b/src/runtime/pack_args.h @@ -168,7 +168,7 @@ inline PackedFunc PackFuncNonBufferArg_( switch (codes[i]) { case INT64_TO_INT64: case FLOAT64_TO_FLOAT64: { - LOG(FATAL) << "Donot support 64bit argument to device function"; break; + LOG(FATAL) << "Do not support 64bit argument to device function"; break; } case INT64_TO_INT32: { holder[i].v_int32 = static_cast<int32_t>(args.values[base + i].v_int64); diff --git a/src/runtime/rpc/rpc_session.cc b/src/runtime/rpc/rpc_session.cc index 0e2d637ab475b24463f785f18763b07c4afac92d..208944a69dceb465542e18ac4523258c024897e9 100644 --- a/src/runtime/rpc/rpc_session.cc +++ b/src/runtime/rpc/rpc_session.cc @@ -250,9 +250,9 @@ class RPCSession::EventHandler : public dmlc::Stream { this->Write(arr->dtype); this->WriteArray(arr->shape, arr->ndim); CHECK(arr->strides == nullptr) - << "Donot support strided remote array"; + << "Do not support strided remote array"; CHECK_EQ(arr->byte_offset, 0) - << "Donot support send byte offset"; + << "Do not support send byte offset"; break; } case kNull: break; diff --git a/vta/python/vta/ir_pass.py b/vta/python/vta/ir_pass.py index 90df67c53278ea6efdf86919e26e89bde5591592..3efef7135edb2b4a46070b906bcb1b2a93331060 100644 --- a/vta/python/vta/ir_pass.py +++ b/vta/python/vta/ir_pass.py @@ -556,7 +556,7 @@ def inject_dma_intrin(stmt_in): return irb.get() else: - raise RuntimeError("Donot support copy %s->%s" % (src.scope, dst.scope)) + raise RuntimeError("Do not support copy %s->%s" % (src.scope, dst.scope)) return tvm.ir_pass.InjectCopyIntrin(stmt_in, "dma_copy", _inject_copy)