'RuntimeError: class '__torch__.kornia.geometry.boxes.Boxes3D' already defined
I am using a Premade code that works on colab.research.google.com but when I downloaded it locally and used Jupyter I got this error
RuntimeError: class '__torch__.kornia.geometry.boxes.Boxes3D' already defined.
Here is the premade code
# @title 3) Download Libraries for Neural Network
import argparse
import math
from pathlib import Path
import sys
sys.path.append('./taming-transformers')
from IPython import display
from base64 import b64encode
from omegaconf import OmegaConf
from PIL import Image
from taming.models import cond_transformer, vqgan
import torch
from torch import nn, optim
from torch.nn import functional as F
from torchvision import transforms
from torchvision.transforms import functional as TF
from tqdm.notebook import tqdm
from CLIP import clip
import kornia.augmentation as K
import numpy as np
import imageio
from PIL import ImageFile, Image
from imgtag import ImgTag # metadatos
from libxmp import * # metadatos
import libxmp # metadatos
from stegano import lsb
import json
ImageFile.LOAD_TRUNCATED_IMAGES = True
def sinc(x):
return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))
def lanczos(x, a):
cond = torch.logical_and(-a < x, x < a)
out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))
return out / out.sum()
def ramp(ratio, width):
n = math.ceil(width / ratio + 1)
out = torch.empty([n])
cur = 0
for i in range(out.shape[0]):
out[i] = cur
cur += ratio
return torch.cat([-out[1:].flip([0]), out])[1:-1]
def resample(input, size, align_corners=True):
n, c, h, w = input.shape
dh, dw = size
input = input.view([n * c, 1, h, w])
if dh < h:
kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)
pad_h = (kernel_h.shape[0] - 1) // 2
input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')
input = F.conv2d(input, kernel_h[None, None, :, None])
if dw < w:
kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)
pad_w = (kernel_w.shape[0] - 1) // 2
input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')
input = F.conv2d(input, kernel_w[None, None, None, :])
input = input.view([n, c, h, w])
return F.interpolate(input, size, mode='bicubic', align_corners=align_corners)
class ReplaceGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, x_forward, x_backward):
ctx.shape = x_backward.shape
return x_forward
@staticmethod
def backward(ctx, grad_in):
return None, grad_in.sum_to_size(ctx.shape)
replace_grad = ReplaceGrad.apply
class ClampWithGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
ctx.min = min
ctx.max = max
ctx.save_for_backward(input)
return input.clamp(min, max)
@staticmethod
def backward(ctx, grad_in):
input, = ctx.saved_tensors
return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None
clamp_with_grad = ClampWithGrad.apply
def vector_quantize(x, codebook):
d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T
indices = d.argmin(-1)
x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook
return replace_grad(x_q, x)
class Prompt(nn.Module):
def __init__(self, embed, weight=1., stop=float('-inf')):
super().__init__()
self.register_buffer('embed', embed)
self.register_buffer('weight', torch.as_tensor(weight))
self.register_buffer('stop', torch.as_tensor(stop))
def forward(self, input):
input_normed = F.normalize(input.unsqueeze(1), dim=2)
embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)
dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)
dists = dists * self.weight.sign()
return self.weight.abs() * replace_grad(dists, torch.maximum(dists, self.stop)).mean()
def parse_prompt(prompt):
vals = prompt.rsplit(':', 2)
vals = vals + ['', '1', '-inf'][len(vals):]
return vals[0], float(vals[1]), float(vals[2])
class MakeCutouts(nn.Module):
def __init__(self, cut_size, cutn, cut_pow=1.):
super().__init__()
self.cut_size = cut_size
self.cutn = cutn
self.cut_pow = cut_pow
self.augs = nn.Sequential(
K.RandomHorizontalFlip(p=0.5),
# K.RandomSolarize(0.01, 0.01, p=0.7),
K.RandomSharpness(0.3,p=0.4),
K.RandomAffine(degrees=30, translate=0.1, p=0.8, padding_mode='border'),
K.RandomPerspective(0.2,p=0.4),
K.ColorJitter(hue=0.01, saturation=0.01, p=0.7))
self.noise_fac = 0.1
def forward(self, input):
sideY, sideX = input.shape[2:4]
max_size = min(sideX, sideY)
min_size = min(sideX, sideY, self.cut_size)
cutouts = []
for _ in range(self.cutn):
size = int(torch.rand([])**self.cut_pow * (max_size - min_size) + min_size)
offsetx = torch.randint(0, sideX - size + 1, ())
offsety = torch.randint(0, sideY - size + 1, ())
cutout = input[:, :, offsety:offsety + size, offsetx:offsetx + size]
cutouts.append(resample(cutout, (self.cut_size, self.cut_size)))
batch = self.augs(torch.cat(cutouts, dim=0))
if self.noise_fac:
facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)
batch = batch + facs * torch.randn_like(batch)
return batch
def load_vqgan_model(config_path, checkpoint_path):
config = OmegaConf.load(config_path)
if config.model.target == 'taming.models.vqgan.VQModel':
model = vqgan.VQModel(**config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer':
parent_model = cond_transformer.Net2NetTransformer(**config.model.params)
parent_model.eval().requires_grad_(False)
parent_model.init_from_ckpt(checkpoint_path)
model = parent_model.first_stage_model
elif config.model.target == 'taming.models.vqgan.GumbelVQ':
model = vqgan.GumbelVQ(**config.model.params)
print(config.model.params)
model.eval().requires_grad_(False)
model.init_from_ckpt(checkpoint_path)
else:
raise ValueError(f'unknown model type: {config.model.target}')
del model.loss
return model
def resize_image(image, out_size):
ratio = image.size[0] / image.size[1]
area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])
size = round((area * ratio)**0.5), round((area / ratio)**0.5)
return image.resize(size, Image.LANCZOS)
def download_img(img_url):
try:
return wget.download(img_url,out="input.jpg")
except:
return
again there isn't any problem with the code it is working, just not in this local conda environment
Here is the output error
RuntimeError Traceback (most recent call last)
Input In [22], in <cell line: 22>()
19 from tqdm.notebook import tqdm
21 from CLIP import clip
---> 22 import kornia.augmentation as K
23 import numpy as np
24 import imageio
File ~\anaconda3\envs\taming\lib\site-packages\kornia\__init__.py:10, in <module>
7 from . import geometry
9 # import the other modules for convenience
---> 10 from . import (
11 augmentation,
12 color,
13 contrib,
14 enhance,
15 feature,
16 losses,
17 metrics,
18 morphology,
19 tracking,
20 utils,
21 x,
22 )
23 # NOTE: we are going to expose to top level very few things
24 from kornia.constants import pi
File ~\anaconda3\envs\taming\lib\site-packages\kornia\augmentation\__init__.py:54, in <module>
41 from kornia.augmentation._3d import (
42 CenterCrop3D,
43 RandomAffine3D,
(...)
51 RandomVerticalFlip3D,
52 )
53 from kornia.augmentation._3d.base import AugmentationBase3D
---> 54 from kornia.augmentation.container import AugmentationSequential, ImageSequential, PatchSequential, VideoSequential
56 __all__ = [
57 "AugmentationBase2D",
58 "GeometricAugmentationBase2D",
(...)
109 "VideoSequential",
110 ]
File ~\anaconda3\envs\taming\lib\site-packages\kornia\augmentation\container\__init__.py:1, in <module>
----> 1 from kornia.augmentation.container.augment import AugmentationSequential
2 from kornia.augmentation.container.image import ImageSequential
3 from kornia.augmentation.container.patch import PatchSequential
File ~\anaconda3\envs\taming\lib\site-packages\kornia\augmentation\container\augment.py:20, in <module>
18 from kornia.augmentation.container.video import VideoSequential
19 from kornia.constants import DataKey
---> 20 from kornia.geometry.boxes import Boxes
22 __all__ = ["AugmentationSequential"]
25 class AugmentationSequential(ImageSequential):
File ~\anaconda3\envs\taming\lib\site-packages\kornia\geometry\boxes.py:465, in <module>
460 self._data = self._data.to(device=device, dtype=dtype)
461 return self
464 @torch.jit.script
--> 465 class Boxes3D:
466 r"""3D boxes containing N or BxN boxes.
467
468 Args:
(...)
478 `hexahedrons <https://en.wikipedia.org/wiki/Hexahedron>`_ are cubes and rhombohedrons.
479 """
480 def __init__(
481 self, boxes: torch.Tensor, raise_if_not_floating_point: bool = True,
482 mode: str = "xyzxyz_plus"
483 ) -> None:
File ~\anaconda3\envs\taming\lib\site-packages\torch\jit\_script.py:924, in script(obj, optimize, _frames_up, _rcb)
921 def fail(self, *args, **kwargs):
922 raise RuntimeError(name + " is not supported on ScriptModules")
--> 924 return fail
File ~\anaconda3\envs\taming\lib\site-packages\torch\jit\_script.py:64, in _compile_and_register_class(obj, rcb, qualified_name)
61 def _reduce(cls):
62 raise pickle.PickleError("ScriptFunction cannot be pickled")
---> 64 ScriptFunction.__reduce__ = _reduce # type: ignore[assignment]
67 if _enabled:
68 Attribute = collections.namedtuple("Attribute", ["value", "type"])
RuntimeError: class '__torch__.kornia.geometry.boxes.Boxes3D' already defined.
https://colab.research.google.com/drive/1lx9AGsrh7MlyJhK9UrNTK8pYpARnx457?usp=sharing
this is the link to the project I downloaded locally instead of using it online, so I am trying to get it up and running
Thanks!!!
Sources
This article follows the attribution requirements of Stack Overflow and is licensed under CC BY-SA 3.0.
Source: Stack Overflow
Solution | Source |
---|