Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Format code with autopep8 #9

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
263 changes: 139 additions & 124 deletions easyocr/DBNet/DBNet.py

Large diffs are not rendered by default.

65 changes: 38 additions & 27 deletions easyocr/DBNet/assets/ops/dcn/functions/deform_conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,69 +12,76 @@
from torch.nn.modules.utils import _pair
from torch.utils import cpp_extension

# TODO - Jaided AI:
# TODO - Jaided AI:
# 1. Find a better way to handle and support both Ahead-of-Time (AoT) and Just-in-Time (JiT) compilation.
# 2. Find a better way to report error to help pinpointing issues if there is any.
# Note on JiT and AoT compilation:
# This module supports both AoT and JiT compilation approaches. JiT is hardcoded as the default. If AoT compiled objects are present, it will supercede JiT compilation.



def custom_formatwarning(msg, *args, **kwargs):
# ignore everything except the message
return str(msg) + '\n'


warnings.formatwarning = custom_formatwarning
dcn_dir = os.path.dirname(os.path.dirname(__file__))
try:
from .. import deform_conv_cpu
warnings.warn("Using precompiled deform_conv_cpu from {}".format(deform_conv_cpu.__file__))
warnings.warn("Using precompiled deform_conv_cpu from {}".format(
deform_conv_cpu.__file__))
dcn_cpu_ready = True
except:
try:
warnings.warn("Compiling deform_conv_cpu ...")
warnings.warn("(This may take a while if this module is loaded for the first time.)")
warnings.warn(
"(This may take a while if this module is loaded for the first time.)")
deform_conv_cpu = cpp_extension.load(
name="deform_conv_cpu",
sources=[os.path.join(dcn_dir, 'src', "deform_conv_cpu.cpp"),
os.path.join(dcn_dir, 'src', "deform_conv_cpu_kernel.cpp")])
name="deform_conv_cpu",
sources=[os.path.join(dcn_dir, 'src', "deform_conv_cpu.cpp"),
os.path.join(dcn_dir, 'src', "deform_conv_cpu_kernel.cpp")])
warnings.warn("Done.")
dcn_cpu_ready = True
except Exception as error:
warnings.warn(' '.join([
"Failed to import and/or compile 'deform_conv_cpu' with the following error",
"{}".format(error),
"Deformable convulution and DBNet will not be able to run on CPU."
]))
]))
dcn_cpu_ready = False

if torch.cuda.is_available():
try:
from .. import deform_conv_cuda
warnings.warn("Using precompiled deform_conv_cuda from {}".format(deform_conv_cuda.__file__))
warnings.warn("Using precompiled deform_conv_cuda from {}".format(
deform_conv_cuda.__file__))
dcn_cuda_ready = True
except:
try:
warnings.warn("Compiling deform_conv_cuda ...")
warnings.warn("(This may take a while if this module is loaded for the first time.)")
cuda_sources = [os.path.join(dcn_dir, 'src', src_file)
for src_file in ["deform_conv_cuda.cpp",
"deform_conv_cuda_kernel.cu"]
]
warnings.warn(
"(This may take a while if this module is loaded for the first time.)")
cuda_sources = [os.path.join(dcn_dir, 'src', src_file)
for src_file in ["deform_conv_cuda.cpp",
"deform_conv_cuda_kernel.cu"]
]
deform_conv_cuda = cpp_extension.load(
name="deform_conv_cuda",
sources=[os.path.join(dcn_dir, 'src', "deform_conv_cuda.cpp"),
os.path.join(dcn_dir, 'src', "deform_conv_cuda_kernel.cu")])
name="deform_conv_cuda",
sources=[os.path.join(dcn_dir, 'src', "deform_conv_cuda.cpp"),
os.path.join(dcn_dir, 'src', "deform_conv_cuda_kernel.cu")])
warnings.warn("Done.")
dcn_cuda_ready = True
except Exception as error:
warnings.warn(' '.join([
"Failed to import or compile 'deform_conv_cuda' with the following error",
"{}".format(error),
"Deformable convulution and DBNet will not be able to run on GPU."
]))
]))
dcn_cuda_ready = False


class DeformConvFunction(Function):

@staticmethod
def forward(ctx,
input,
Expand Down Expand Up @@ -125,9 +132,10 @@ def forward(ctx,
else:
device_ = input.device.type
raise RuntimeError(
"Input type is {}, but 'deform_conv_{}.*.so' is not imported successfully.".format(device_, device_),
)

"Input type is {}, but 'deform_conv_{}.*.so' is not imported successfully.".format(
device_, device_),
)

return output

@staticmethod
Expand All @@ -137,7 +145,8 @@ def backward(ctx, grad_output):
grad_input = grad_offset = grad_weight = None

if not grad_output.is_cuda:
raise NotImplementedError("DCN operator for cpu for backward propagation is not implemented.")
raise NotImplementedError(
"DCN operator for cpu for backward propagation is not implemented.")
else:
cur_im2col_step = min(ctx.im2col_step, input.shape[0])
assert (input.shape[0] %
Expand Down Expand Up @@ -206,7 +215,7 @@ def forward(ctx,
ctx.with_bias = bias is not None
if not ctx.with_bias:
bias = input.new_empty(1) # fake tensor

if weight.requires_grad or mask.requires_grad or offset.requires_grad \
or input.requires_grad:
ctx.save_for_backward(input, offset, mask, weight, bias)
Expand All @@ -228,14 +237,16 @@ def forward(ctx,
else:
device_ = input.device.type
raise RuntimeError(
"Input type is {}, but 'deform_conv_{}.*.so' is not imported successfully.".format(device_, device_),
)
"Input type is {}, but 'deform_conv_{}.*.so' is not imported successfully.".format(
device_, device_),
)
return output

@staticmethod
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError("DCN operator for CPU for backward propagation is not implemented.")
raise NotImplementedError(
"DCN operator for CPU for backward propagation is not implemented.")
input, offset, mask, weight, bias = ctx.saved_tensors
grad_input = torch.zeros_like(input)
grad_offset = torch.zeros_like(offset)
Expand Down
49 changes: 29 additions & 20 deletions easyocr/DBNet/assets/ops/dcn/functions/deform_pool.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,63 +11,70 @@
from torch.autograd import Function
from torch.utils import cpp_extension

# TODO - Jaided AI:
# TODO - Jaided AI:
# 1. Find a better way to handle and support both Ahead-of-Time (AoT) and Just-in-Time (JiT) compilation.
# 2. Find a better way to report error to help pinpointing issues if there is any.
# Note on JiT and AoT compilation:
# This module supports both AoT and JiT compilation approaches. JiT is hardcoded as the default. If AoT compiled objects are present, it will supercede JiT compilation.



def custom_formatwarning(msg, *args, **kwargs):
# ignore everything except the message
return str(msg) + '\n'


warnings.formatwarning = custom_formatwarning
dcn_dir = os.path.dirname(os.path.dirname(__file__))
try:
from .. import deform_pool_cpu
warnings.warn("Using precompiled deform_pool_cpu from {}".format(deform_pool_cpu.__file__))
warnings.warn("Using precompiled deform_pool_cpu from {}".format(
deform_pool_cpu.__file__))
dcn_cpu_ready = True
except:
try:
warnings.warn("Compiling deform_pool_cpu ...")
warnings.warn("(This may take a while if this module is loaded for the first time.)")
warnings.warn(
"(This may take a while if this module is loaded for the first time.)")
deform_pool_cpu = cpp_extension.load(
name="deform_pool_cpu",
sources=[os.path.join(dcn_dir, 'src', "deform_pool_cpu.cpp"),
os.path.join(dcn_dir, 'src', "deform_pool_cpu_kernel.cpp")])
name="deform_pool_cpu",
sources=[os.path.join(dcn_dir, 'src', "deform_pool_cpu.cpp"),
os.path.join(dcn_dir, 'src', "deform_pool_cpu_kernel.cpp")])
warnings.warn("Done.")
dcn_cpu_ready = True
except Exception as error:
warnings.warn(' '.join([
"Failed to import or compile 'deform_pool_cpu' with the following error",
"{}".format(error),
"Deformable convulution and DBNet will not be able to run on CPU."
]))
]))
dcn_cpu_ready = False

if torch.cuda.is_available():
try:
from .. import deform_pool_cuda
warnings.warn("Using precompiled deform_pool_cuda from {}".format(deform_pool_cuda.__file__))
warnings.warn("Using precompiled deform_pool_cuda from {}".format(
deform_pool_cuda.__file__))
dcn_cuda_ready = True
except:
try:
warnings.warn("Compiling deform_pool_cuda ...")
warnings.warn("(This may take a while if this module is loaded for the first time.)")
warnings.warn(
"(This may take a while if this module is loaded for the first time.)")
deform_pool_cuda = cpp_extension.load(
name="deform_pool_cuda",
sources=[os.path.join(dcn_dir, 'src', "deform_pool_cuda.cpp"),
os.path.join(dcn_dir, 'src', "deform_pool_cuda_kernel.cu")])
name="deform_pool_cuda",
sources=[os.path.join(dcn_dir, 'src', "deform_pool_cuda.cpp"),
os.path.join(dcn_dir, 'src', "deform_pool_cuda_kernel.cu")])
warnings.warn("Done.")
dcn_cuda_ready = True
except Exception as error:
warnings.warn(' '.join([
"Failed to import or compile 'deform_pool_cuda' with the following error",
"{}".format(error),
"Deformable convulution and DBNet will not be able to run on GPU."
]))
]))
dcn_cuda_ready = False


class DeformRoIPoolingFunction(Function):

@staticmethod
Expand All @@ -93,7 +100,7 @@ def forward(ctx,
ctx.trans_std = trans_std

assert 0.0 <= ctx.trans_std <= 1.0

n = rois.shape[0]
output = data.new_empty(n, out_channels, out_size, out_size)
output_count = data.new_empty(n, out_channels, out_size, out_size)
Expand All @@ -102,17 +109,18 @@ def forward(ctx,
data, rois, offset, output, output_count, ctx.no_trans,
ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size,
ctx.part_size, ctx.sample_per_part, ctx.trans_std)
elif data.is_cuda and dcn_cuda_ready:
elif data.is_cuda and dcn_cuda_ready:
deform_pool_cuda.deform_psroi_pooling_cuda_forward(
data, rois, offset, output, output_count, ctx.no_trans,
ctx.spatial_scale, ctx.out_channels, ctx.group_size, ctx.out_size,
ctx.part_size, ctx.sample_per_part, ctx.trans_std)
else:
device_ = input.device.type
raise RuntimeError(
"Input type is {}, but 'deform_conv_{}.*.so' is not imported successfully.".format(device_, device_),
)

"Input type is {}, but 'deform_conv_{}.*.so' is not imported successfully.".format(
device_, device_),
)

if data.requires_grad or rois.requires_grad or offset.requires_grad:
ctx.save_for_backward(data, rois, offset)
ctx.output_count = output_count
Expand All @@ -122,7 +130,8 @@ def forward(ctx,
@staticmethod
def backward(ctx, grad_output):
if not grad_output.is_cuda:
raise NotImplementedError("DCN operator for cpu for backward propagation is not implemented.")
raise NotImplementedError(
"DCN operator for cpu for backward propagation is not implemented.")

data, rois, offset = ctx.saved_tensors
output_count = ctx.output_count
Expand Down
18 changes: 9 additions & 9 deletions easyocr/DBNet/assets/ops/dcn/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,14 @@
from torch.utils.cpp_extension import CUDAExtension

modules = [
CppExtension('deform_conv_cpu', [
'src/deform_conv_cpu.cpp',
'src/deform_conv_cpu_kernel.cpp',
]),
CppExtension('deform_pool_cpu', [
'src/deform_pool_cpu.cpp',
'src/deform_pool_cpu_kernel.cpp'
])
CppExtension('deform_conv_cpu', [
'src/deform_conv_cpu.cpp',
'src/deform_conv_cpu_kernel.cpp',
]),
CppExtension('deform_pool_cpu', [
'src/deform_pool_cpu.cpp',
'src/deform_pool_cpu_kernel.cpp'
])
]

if torch.cuda.is_available():
Expand All @@ -22,7 +22,7 @@
'src/deform_conv_cuda_kernel.cu',
]),
CUDAExtension('deform_pool_cuda', [
'src/deform_pool_cuda.cpp',
'src/deform_pool_cuda.cpp',
'src/deform_pool_cuda_kernel.cu'
])
])
Expand Down
Loading