Skip to content

Commit

Permalink
[CODE QUALITY]
Browse files Browse the repository at this point in the history
  • Loading branch information
Kye committed Dec 19, 2023
1 parent 086d008 commit 4748f67
Show file tree
Hide file tree
Showing 67 changed files with 81 additions and 121 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ data
# Distribution / packaging
.Python
build/
.ruff_cache
.vscode
develop-eggs/
dist/
Expand Down
2 changes: 2 additions & 0 deletions docs/corporate/zeta_cloud.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,3 +58,5 @@ The estimated timeline for shipping Zeta Cloud is as follows:
| Marketplace for Pre-Trained Models | A platform for users to buy, sell, or license pre-trained models. | AI developers, companies looking for ready-to-use models. | Transaction fees, subscription for premium listings. |
| Data Storage and Management | Integrated solutions for data storage, processing, and management. | All users of the platform. | Based on the amount of data stored/processed. |
| API Access for Third-Party Integrations | Providing API access for integration with other tools and services. | Developers, businesses needing integrations. | Monthly/Annual subscription or pay-per-use. |


1 change: 0 additions & 1 deletion playground/models/flamingo.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
import torch.nn.functional as F
from einops import rearrange
from torch import einsum, nn
from zeta.nn.modules.simple_feedforward import SimpleFeedForward
from zeta.nn.attention.cross_attn_images import MultiModalCrossAttention
import zeta.nn as znn

Expand Down
1 change: 0 additions & 1 deletion playground/models/simple_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@
from zeta.nn.modules.feedforward import FeedForward
from zeta.nn.attention.shaped_attention import ShapedAttention
from zeta.nn.modules.residual import Residual
from zeta.nn.attention import FlashAttention


class SimpleTransformerBlock(nn.Module):
Expand Down
2 changes: 0 additions & 2 deletions tests/nn/attentions/test_cross_attn.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
import pytest
import torch
from torch import nn
from zeta.nn.attention.cross_attention import CrossAttention

# Create an instance of CrossAttention for testing
Expand Down
1 change: 0 additions & 1 deletion tests/nn/attentions/test_local_attn_mha.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import pytest
import torch
import torch.nn as nn
from torch.autograd import gradcheck
from zeta.nn.attention.local_attention_mha import LocalMHA

Expand Down
1 change: 0 additions & 1 deletion tests/nn/attentions/test_mgqa.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import pytest
import torch
from zeta.nn.attention.mgqa import MGQA, CacheView
from zeta.utils.main import exists


# Create an instance of MGQA for testing
Expand Down
3 changes: 0 additions & 3 deletions tests/nn/attentions/test_shaped_attn.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,4 @@
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from zeta.nn.attention.shaped_attention import ShapedAttention


Expand Down
4 changes: 0 additions & 4 deletions tests/nn/attentions/test_sparse_attn.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,10 +65,6 @@ def test_sparse_attention_forward():
n_batch = 4
n_ctx = 1024
n_embd = 256
heads = 4
attn_mode = "all"
local_attn_ctx = 32
blocksize = 32

q = torch.randn(n_batch, n_ctx, n_embd)
k = torch.randn(n_batch, n_ctx, n_embd)
Expand Down
6 changes: 3 additions & 3 deletions tests/nn/attentions/test_xc_attention.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def test_xc_attention_forward_with_invalid_inputs(xc_attention_model):
with pytest.raises(Exception):
x = torch.randn(1, 256, 16, 16)
cond = torch.randn(1, 128) # Mismatched conditioning dimension
output = xc_attention_model(x, cond)
xc_attention_model(x, cond)


# Test case to check if XCAttention handles different head configurations correctly
Expand Down Expand Up @@ -81,10 +81,10 @@ def test_xc_attention_with_different_cond_dims():
# Test case to check if XCAttention handles negative input dimensions correctly
def test_xc_attention_negative_input_dim():
with pytest.raises(ValueError):
model = XCAttention(dim=-256, cond_dim=64, heads=8)
XCAttention(dim=-256, cond_dim=64, heads=8)


# Test case to check if XCAttention handles negative conditioning dimensions correctly
def test_xc_attention_negative_cond_dim():
with pytest.raises(ValueError):
model = XCAttention(dim=256, cond_dim=-64, heads=8)
XCAttention(dim=256, cond_dim=-64, heads=8)
14 changes: 7 additions & 7 deletions tests/nn/biases/test_alibi.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,9 +152,9 @@ def tensors_equal(tensor1, tensor2):

# Test for the existence of a helper function exists
def test_exists_function():
assert exists(None) == False
assert exists(0) == True
assert exists("Hello") == True
assert exists(None) is False
assert exists(0) is True
assert exists("Hello") is True


# Test for the pad_at_dim helper function
Expand All @@ -170,8 +170,8 @@ def test_tensors_equal_function():
tensor2 = torch.tensor([1.0, 2.0, 3.0])
tensor3 = torch.tensor([1.0, 2.0, 3.1])

assert tensors_equal(tensor1, tensor2) == True
assert tensors_equal(tensor1, tensor3) == False
assert tensors_equal(tensor1, tensor2) is True
assert tensors_equal(tensor1, tensor3) is False


# Additional tests for tensor manipulation functions
Expand All @@ -193,8 +193,8 @@ def test_einops_rearrange_function():

# Test for the nn.Module class inheritance
def test_nn_module_inheritance():
assert issubclass(AlibiPositionalBias, nn.Module) == True
assert issubclass(LearnedAlibiPositionalBias, nn.Module) == True
assert issubclass(AlibiPositionalBias, nn.Module) is True
assert issubclass(LearnedAlibiPositionalBias, nn.Module) is True


# Helper function to create random data
Expand Down
7 changes: 3 additions & 4 deletions tests/nn/biases/test_relative_position_bias.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import pytest
import torch
import torch.nn as nn
from zeta.nn.biases.relative_position_bias import RelativePositionBias


Expand Down Expand Up @@ -238,13 +237,13 @@ def test_different_bidirectional_bias_values():
# Test case for initializing with negative max distance
def test_negative_max_distance_init():
with pytest.raises(ValueError):
bias = RelativePositionBias(max_distance=-128)
RelativePositionBias(max_distance=-128)


# Test case for initializing with negative num buckets
def test_negative_num_buckets_init():
with pytest.raises(ValueError):
bias = RelativePositionBias(num_buckets=-32)
RelativePositionBias(num_buckets=-32)


# Test case for initializing with a large max distance
Expand Down Expand Up @@ -280,4 +279,4 @@ def test_large_num_buckets():
# Test case for bidirectional bias with negative max distance
def test_bidirectional_bias_negative_max_distance():
with pytest.raises(ValueError):
bias = RelativePositionBias(bidirectional=True, max_distance=-128)
RelativePositionBias(bidirectional=True, max_distance=-128)
6 changes: 3 additions & 3 deletions tests/nn/embeddings/test_QFTSPEmbeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,18 +69,18 @@ def test_qftspembeddings_forward_negative_dim():
vocab_size = 10000
dim = -512
with pytest.raises(ValueError):
model = QFTSPEmbeddings(vocab_size, dim)
QFTSPEmbeddings(vocab_size, dim)


def test_qftspembeddings_forward_negative_vocab_size():
vocab_size = -10000
dim = 512
with pytest.raises(ValueError):
model = QFTSPEmbeddings(vocab_size, dim)
QFTSPEmbeddings(vocab_size, dim)


def test_qftspembeddings_forward_zero_vocab_size():
vocab_size = 0
dim = 512
with pytest.raises(ValueError):
model = QFTSPEmbeddings(vocab_size, dim)
QFTSPEmbeddings(vocab_size, dim)
1 change: 0 additions & 1 deletion tests/nn/embeddings/test_patch_embedding.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
import pytest
import torch
from torch import nn
from einops.layers.torch import Rearrange
Expand Down
2 changes: 0 additions & 2 deletions tests/nn/embeddings/test_rope.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
import pytest
import torch
from torch import nn

from zeta.nn.embeddings.rope import (
RotaryEmbedding,
Expand Down
5 changes: 2 additions & 3 deletions tests/nn/embeddings/test_sine_positional_embs.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import pytest
import torch
from torch import nn
from zeta.nn.embeddings.sine_positional import SinePositionalEmbedding


Expand Down Expand Up @@ -76,11 +75,11 @@ def test_extend_pe():
def test_negative_dimension():
dim_model = -512
with pytest.raises(ValueError):
module = SinePositionalEmbedding(dim_model)
SinePositionalEmbedding(dim_model)


# Test case for initializing with alpha=True and dropout > 0
def test_alpha_and_dropout():
dim_model = 512
with pytest.raises(ValueError):
module = SinePositionalEmbedding(dim_model, alpha=True, dropout=0.2)
SinePositionalEmbedding(dim_model, alpha=True, dropout=0.2)
8 changes: 3 additions & 5 deletions tests/nn/embeddings/test_truncated_rotary_emb.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,4 @@
import pytest
import torch
from torch import nn
from zeta.nn.embeddings.truncated_rope import TruncatedRotaryEmbedding


Expand Down Expand Up @@ -50,7 +48,7 @@ def test_negative_dimension():
b = 1.0
rho = 0.0
with pytest.raises(ValueError):
module = TruncatedRotaryEmbedding(dim, a, b, rho)
TruncatedRotaryEmbedding(dim, a, b, rho)


# Test case for initializing with a > b
Expand All @@ -60,7 +58,7 @@ def test_a_greater_than_b():
b = 0.5
rho = 0.0
with pytest.raises(ValueError):
module = TruncatedRotaryEmbedding(dim, a, b, rho)
TruncatedRotaryEmbedding(dim, a, b, rho)


# Test case for initializing with rho > b
Expand All @@ -70,4 +68,4 @@ def test_rho_greater_than_b():
b = 1.0
rho = 1.5
with pytest.raises(ValueError):
module = TruncatedRotaryEmbedding(dim, a, b, rho)
TruncatedRotaryEmbedding(dim, a, b, rho)
8 changes: 4 additions & 4 deletions tests/nn/embeddings/test_vision_embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,25 +98,25 @@ def test_forward_custom():
# Test case for initializing with incorrect image size
def test_incorrect_img_size_init():
with pytest.raises(AssertionError):
module = VisionEmbedding(img_size=256)
VisionEmbedding(img_size=256)


# Test case for initializing with incorrect patch size
def test_incorrect_patch_size_init():
with pytest.raises(AssertionError):
module = VisionEmbedding(patch_size=64)
VisionEmbedding(patch_size=64)


# Test case for initializing with negative in_chans
def test_negative_in_chans_init():
with pytest.raises(ValueError):
module = VisionEmbedding(in_chans=-3)
VisionEmbedding(in_chans=-3)


# Test case for initializing with negative embed_dim
def test_negative_embed_dim_init():
with pytest.raises(ValueError):
module = VisionEmbedding(embed_dim=-768)
VisionEmbedding(embed_dim=-768)


# Test case for initializing with invalid masked_position
Expand Down
4 changes: 2 additions & 2 deletions tests/nn/embeddings/test_vision_lang_embeddings.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,15 +49,15 @@ def test_incorrect_text_embedding_init():
text_embed = nn.Linear(10, 10)
vision_embed = nn.Embedding(10, 10)
with pytest.raises(AssertionError):
module = VisionLanguageEmbedding(text_embed, vision_embed)
VisionLanguageEmbedding(text_embed, vision_embed)


# Test case for initializing with incorrect vision embedding
def test_incorrect_vision_embedding_init():
text_embed = nn.Embedding(10, 10)
vision_embed = nn.Linear(10, 10)
with pytest.raises(AssertionError):
module = VisionLanguageEmbedding(text_embed, vision_embed)
VisionLanguageEmbedding(text_embed, vision_embed)


# Test case for forward pass with text input being None
Expand Down
1 change: 0 additions & 1 deletion tests/nn/modules/test_cross_attn_images.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import torch
import torch.nn as nn
import numpy as np
import pytest
from torch.autograd import gradcheck
from zeta.nn.attention.cross_attn_images import MultiModalCrossAttention
Expand Down
1 change: 0 additions & 1 deletion tests/nn/modules/test_custom_mlp.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
from zeta.nn.modules.flexible_mlp import CustomMLP


Expand Down
1 change: 0 additions & 1 deletion tests/nn/modules/test_hebbian.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import pytest
import torch
import torch.nn as nn

from zeta.nn.modules.hebbian import (
BasicHebbianGRUModel,
Expand Down
10 changes: 5 additions & 5 deletions tests/nn/modules/test_image_projector.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def test_patch_projector_performance(sample_input_tensor):
# Measure the time taken for 100 forward passes
start_time = time.time()
for _ in range(100):
output_tensor = patch_projector(input_tensor)
patch_projector(input_tensor)
end_time = time.time()

elapsed_time = end_time - start_time
Expand Down Expand Up @@ -211,7 +211,7 @@ def test_patch_projector_performance_various_input_sizes(
# Measure the time taken for 100 forward passes
start_time = time.time()
for _ in range(100):
output_tensor = patch_projector(input_tensor)
patch_projector(input_tensor)
end_time = time.time()

elapsed_time = end_time - start_time
Expand Down Expand Up @@ -249,15 +249,15 @@ def test_patch_projector_output_shape_consistency(sample_input_tensor):
# Test case for edge case: invalid max_patch_size
def test_patch_projector_invalid_max_patch_size():
with pytest.raises(ValueError):
patch_projector = ImagePatchCreatorProjector(
ImagePatchCreatorProjector(
max_patch_size=0, embedding_dim=768
)


# Test case for edge case: invalid embedding_dim
def test_patch_projector_invalid_embedding_dim():
with pytest.raises(ValueError):
patch_projector = ImagePatchCreatorProjector(
ImagePatchCreatorProjector(
max_patch_size=16, embedding_dim=0
)

Expand All @@ -270,7 +270,7 @@ def test_patch_projector_invalid_input_shape():
input_tensor = torch.randn(1, 3, 32, 32) # Smaller image

with pytest.raises(ValueError):
output_tensor = patch_projector(input_tensor)
patch_projector(input_tensor)


# Test case for dynamic patch size calculation
Expand Down
2 changes: 1 addition & 1 deletion tests/nn/modules/test_log_ff.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import torch
import pytest
from zeta.nn.modules.log_ff import LogFF, compute_entropy_safe
from zeta.nn.modules.log_ff import LogFF


# Test fixture for a sample input tensor
Expand Down
2 changes: 1 addition & 1 deletion tests/nn/modules/test_test_conv_lang.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ def test_with_mocked_convolution_layer():
block = ConvolutionLanguageBlock(128, 256, 3, 1)
block.conv_layers[0] = mock_convolution
x = torch.randn(1, 128, 1024)
output = block(x)
block(x)
assert mock_convolution.called


Expand Down
Loading

0 comments on commit 4748f67

Please sign in to comment.