-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
c2603a8
commit 2882268
Showing
46 changed files
with
8,925 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license | ||
""" | ||
utils/initialization | ||
""" | ||
|
||
import contextlib | ||
import platform | ||
import threading | ||
|
||
|
||
def emojis(str=''): | ||
# Return platform-dependent emoji-safe version of string | ||
return str.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else str | ||
|
||
|
||
class TryExcept(contextlib.ContextDecorator): | ||
# YOLOv5 TryExcept class. Usage: @TryExcept() decorator or 'with TryExcept():' context manager | ||
def __init__(self, msg=''): | ||
self.msg = msg | ||
|
||
def __enter__(self): | ||
pass | ||
|
||
def __exit__(self, exc_type, value, traceback): | ||
if value: | ||
print(emojis(f"{self.msg}{': ' if self.msg else ''}{value}")) | ||
return True | ||
|
||
|
||
def threaded(func): | ||
# Multi-threads a target function and returns thread. Usage: @threaded decorator | ||
def wrapper(*args, **kwargs): | ||
thread = threading.Thread(target=func, args=args, kwargs=kwargs, daemon=True) | ||
thread.start() | ||
return thread | ||
|
||
return wrapper | ||
|
||
|
||
def join_threads(verbose=False): | ||
# Join all daemon threads, i.e. atexit.register(lambda: join_threads()) | ||
main_thread = threading.current_thread() | ||
for t in threading.enumerate(): | ||
if t is not main_thread: | ||
if verbose: | ||
print(f'Joining thread {t.name}') | ||
t.join() | ||
|
||
|
||
def notebook_init(verbose=True): | ||
# Check system software and hardware | ||
print('Checking setup...') | ||
|
||
import os | ||
import shutil | ||
|
||
from ultralytics.yolo.utils.checks import check_requirements | ||
|
||
from utils.general import check_font, is_colab | ||
from utils.torch_utils import select_device # imports | ||
|
||
check_font() | ||
|
||
import psutil | ||
|
||
if check_requirements('wandb', install=False): | ||
os.system('pip uninstall -y wandb') # eliminate unexpected account creation prompt with infinite hang | ||
if is_colab(): | ||
shutil.rmtree('/content/sample_data', ignore_errors=True) # remove colab /sample_data directory | ||
|
||
# System info | ||
display = None | ||
if verbose: | ||
gb = 1 << 30 # bytes to GiB (1024 ** 3) | ||
ram = psutil.virtual_memory().total | ||
total, used, free = shutil.disk_usage('/') | ||
with contextlib.suppress(Exception): # clear display if ipython is installed | ||
from IPython import display | ||
display.clear_output() | ||
s = f'({os.cpu_count()} CPUs, {ram / gb:.1f} GB RAM, {(total - free) / gb:.1f}/{total / gb:.1f} GB disk)' | ||
else: | ||
s = '' | ||
|
||
select_device(newline=False) | ||
print(emojis(f'Setup complete ✅ {s}')) | ||
return display |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,103 @@ | ||
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license | ||
""" | ||
Activation functions | ||
""" | ||
|
||
import torch | ||
import torch.nn as nn | ||
import torch.nn.functional as F | ||
|
||
|
||
class SiLU(nn.Module): | ||
# SiLU activation https://arxiv.org/pdf/1606.08415.pdf | ||
@staticmethod | ||
def forward(x): | ||
return x * torch.sigmoid(x) | ||
|
||
|
||
class Hardswish(nn.Module): | ||
# Hard-SiLU activation | ||
@staticmethod | ||
def forward(x): | ||
# return x * F.hardsigmoid(x) # for TorchScript and CoreML | ||
return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX | ||
|
||
|
||
class Mish(nn.Module): | ||
# Mish activation https://github.com/digantamisra98/Mish | ||
@staticmethod | ||
def forward(x): | ||
return x * F.softplus(x).tanh() | ||
|
||
|
||
class MemoryEfficientMish(nn.Module): | ||
# Mish activation memory-efficient | ||
class F(torch.autograd.Function): | ||
|
||
@staticmethod | ||
def forward(ctx, x): | ||
ctx.save_for_backward(x) | ||
return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) | ||
|
||
@staticmethod | ||
def backward(ctx, grad_output): | ||
x = ctx.saved_tensors[0] | ||
sx = torch.sigmoid(x) | ||
fx = F.softplus(x).tanh() | ||
return grad_output * (fx + x * sx * (1 - fx * fx)) | ||
|
||
def forward(self, x): | ||
return self.F.apply(x) | ||
|
||
|
||
class FReLU(nn.Module): | ||
# FReLU activation https://arxiv.org/abs/2007.11824 | ||
def __init__(self, c1, k=3): # ch_in, kernel | ||
super().__init__() | ||
self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) | ||
self.bn = nn.BatchNorm2d(c1) | ||
|
||
def forward(self, x): | ||
return torch.max(x, self.bn(self.conv(x))) | ||
|
||
|
||
class AconC(nn.Module): | ||
r""" ACON activation (activate or not) | ||
AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter | ||
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. | ||
""" | ||
|
||
def __init__(self, c1): | ||
super().__init__() | ||
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) | ||
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) | ||
self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) | ||
|
||
def forward(self, x): | ||
dpx = (self.p1 - self.p2) * x | ||
return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x | ||
|
||
|
||
class MetaAconC(nn.Module): | ||
r""" ACON activation (activate or not) | ||
MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network | ||
according to "Activate or Not: Learning Customized Activation" <https://arxiv.org/pdf/2009.04759.pdf>. | ||
""" | ||
|
||
def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r | ||
super().__init__() | ||
c2 = max(r, c1 // r) | ||
self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) | ||
self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) | ||
self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) | ||
self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) | ||
# self.bn1 = nn.BatchNorm2d(c2) | ||
# self.bn2 = nn.BatchNorm2d(c1) | ||
|
||
def forward(self, x): | ||
y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) | ||
# batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 | ||
# beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable | ||
beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed | ||
dpx = (self.p1 - self.p2) * x | ||
return dpx * torch.sigmoid(beta * dpx) + self.p2 * x |
Oops, something went wrong.