Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -48,3 +48,11 @@ simplefold = "simplefold.cli:main"
# Tell hatchling where your packages live when using src layout:
[tool.hatch.build.targets.wheel]
packages = ["src/simplefold"]

# Tell setuptools to include YAML files from configs
[tool.setuptools.packages.find]
where = ["src"]
include = ["simplefold*"]

[tool.setuptools.package-data]
"simplefold.configs" = ["**/*.yaml"]
Empty file.
Empty file.
99 changes: 99 additions & 0 deletions src/simplefold/configs/model/architecture/foldingdit_1.1B.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
_target_: model.torch.architecture.FoldingDiT

hidden_size: 1280
num_heads: 20
atom_num_heads: 6
output_channels: 3
use_atom_mask: False
use_length_condition: True
esm_dropout_prob: 0.0
esm_model: esm2_3B

time_embedder:
_target_: model.torch.layers.TimestepEmbedder
hidden_size: 1280
aminoacid_pos_embedder:
_target_: model.torch.pos_embed.AbsolutePositionEncoding
in_dim: 1
embed_dim: 1280
include_input: True
pos_embedder:
_target_: model.torch.pos_embed.FourierPositionEncoding
in_dim: 3
include_input: True
min_freq_log2: 0
max_freq_log2: 12
num_freqs: 128
log_sampling: True

trunk:
_target_: model.torch.blocks.HomogenTrunk
depth: 36
block:
_target_: model.torch.blocks.DiTBlock
_partial_: True # because in the for loop we create a new module
hidden_size: 1280
mlp_ratio: 4.0
use_swiglu: True # SwiGLU FFN
self_attention_layer:
_target_: model.torch.layers.EfficientSelfAttentionLayer
_partial_: True
hidden_size: 1280
num_heads: 20
qk_norm: True
pos_embedder:
_target_: model.torch.pos_embed.AxialRotaryPositionEncoding
in_dim: 4
embed_dim: 1280
num_heads: 20
base: 100.0

atom_hidden_size_enc: 384
atom_n_queries_enc: 32
atom_n_keys_enc: 128
atom_encoder_transformer:
_target_: model.torch.blocks.HomogenTrunk
depth: 2
block:
_target_: model.torch.blocks.DiTBlock
_partial_: True # because in the for loop we create a new module
hidden_size: 384
mlp_ratio: 4.0
use_swiglu: True # SwiGLU FFN
self_attention_layer:
_target_: model.torch.layers.EfficientSelfAttentionLayer
_partial_: True
hidden_size: 384
num_heads: 6
qk_norm: True
pos_embedder:
_target_: model.torch.pos_embed.AxialRotaryPositionEncoding
in_dim: 4
embed_dim: 384
num_heads: 6
base: 100.0

atom_hidden_size_dec: 384
atom_n_queries_dec: 32
atom_n_keys_dec: 128
atom_decoder_transformer:
_target_: model.torch.blocks.HomogenTrunk
depth: 2
block:
_target_: model.torch.blocks.DiTBlock
_partial_: True # because in the for loop we create a new module
hidden_size: 384
mlp_ratio: 4.0
use_swiglu: True # SwiGLU FFN
self_attention_layer:
_target_: model.torch.layers.EfficientSelfAttentionLayer
_partial_: True
hidden_size: 384
num_heads: 6
qk_norm: True
pos_embedder:
_target_: model.torch.pos_embed.AxialRotaryPositionEncoding
in_dim: 4
embed_dim: 384
num_heads: 6
base: 100.0
99 changes: 99 additions & 0 deletions src/simplefold/configs/model/architecture/foldingdit_1.6B.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
_target_: model.torch.architecture.FoldingDiT

hidden_size: 1536
num_heads: 24
atom_num_heads: 8
output_channels: 3
use_atom_mask: False
use_length_condition: True
esm_dropout_prob: 0.0
esm_model: esm2_3B

time_embedder:
_target_: model.torch.layers.TimestepEmbedder
hidden_size: 1536
aminoacid_pos_embedder:
_target_: model.torch.pos_embed.AbsolutePositionEncoding
in_dim: 1
embed_dim: 1536
include_input: True
pos_embedder:
_target_: model.torch.pos_embed.FourierPositionEncoding
in_dim: 3
include_input: True
min_freq_log2: 0
max_freq_log2: 12
num_freqs: 128
log_sampling: True

trunk:
_target_: model.torch.blocks.HomogenTrunk
depth: 36
block:
_target_: model.torch.blocks.DiTBlock
_partial_: True # because in the for loop we create a new module
hidden_size: 1536
mlp_ratio: 4.0
use_swiglu: True # SwiGLU FFN
self_attention_layer:
_target_: model.torch.layers.EfficientSelfAttentionLayer
_partial_: True
hidden_size: 1536
num_heads: 24
qk_norm: True
pos_embedder:
_target_: model.torch.pos_embed.AxialRotaryPositionEncoding
in_dim: 4
embed_dim: 1536
num_heads: 24
base: 100.0

atom_hidden_size_enc: 512
atom_n_queries_enc: 32
atom_n_keys_enc: 128
atom_encoder_transformer:
_target_: model.torch.blocks.HomogenTrunk
depth: 3
block:
_target_: model.torch.blocks.DiTBlock
_partial_: True # because in the for loop we create a new module
hidden_size: 512
mlp_ratio: 4.0
use_swiglu: True # SwiGLU FFN
self_attention_layer:
_target_: model.torch.layers.EfficientSelfAttentionLayer
_partial_: True
hidden_size: 512
num_heads: 8
qk_norm: True
pos_embedder:
_target_: model.torch.pos_embed.AxialRotaryPositionEncoding
in_dim: 4
embed_dim: 512
num_heads: 8
base: 100.0

atom_hidden_size_dec: 512
atom_n_queries_dec: 32
atom_n_keys_dec: 128
atom_decoder_transformer:
_target_: model.torch.blocks.HomogenTrunk
depth: 3
block:
_target_: model.torch.blocks.DiTBlock
_partial_: True # because in the for loop we create a new module
hidden_size: 512
mlp_ratio: 4.0
use_swiglu: True # SwiGLU FFN
self_attention_layer:
_target_: model.torch.layers.EfficientSelfAttentionLayer
_partial_: True
hidden_size: 512
num_heads: 8
qk_norm: True
pos_embedder:
_target_: model.torch.pos_embed.AxialRotaryPositionEncoding
in_dim: 4
embed_dim: 512
num_heads: 8
base: 100.0
101 changes: 101 additions & 0 deletions src/simplefold/configs/model/architecture/foldingdit_100M.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
_target_: model.torch.architecture.FoldingDiT

hidden_size: 768
num_heads: 12
atom_num_heads: 4
output_channels: 3
use_atom_mask: False
use_length_condition: True
esm_dropout_prob: 0.0
esm_model: esm2_3B

time_embedder:
_target_: model.torch.layers.TimestepEmbedder
hidden_size: 768

aminoacid_pos_embedder:
_target_: model.torch.pos_embed.AbsolutePositionEncoding
in_dim: 1
embed_dim: 768
include_input: True

pos_embedder:
_target_: model.torch.pos_embed.FourierPositionEncoding
in_dim: 3
include_input: True
min_freq_log2: 0
max_freq_log2: 12
num_freqs: 128
log_sampling: True

trunk:
_target_: model.torch.blocks.HomogenTrunk
depth: 8
block:
_target_: model.torch.blocks.DiTBlock
_partial_: True # because in the for loop we create a new module
hidden_size: 768
mlp_ratio: 4.0
use_swiglu: True # SwiGLU FFN
self_attention_layer:
_target_: model.torch.layers.EfficientSelfAttentionLayer
_partial_: True
hidden_size: 768
num_heads: 12
qk_norm: True
pos_embedder:
_target_: model.torch.pos_embed.AxialRotaryPositionEncoding
in_dim: 4
embed_dim: 768
num_heads: 12
base: 100.0

atom_hidden_size_enc: 256
atom_n_queries_enc: 32
atom_n_keys_enc: 128
atom_encoder_transformer:
_target_: model.torch.blocks.HomogenTrunk
depth: 1
block:
_target_: model.torch.blocks.DiTBlock
_partial_: True # because in the for loop we create a new module
hidden_size: 256
mlp_ratio: 4.0
use_swiglu: True # SwiGLU FFN
self_attention_layer:
_target_: model.torch.layers.EfficientSelfAttentionLayer
_partial_: True
hidden_size: 256
num_heads: 4
qk_norm: True
pos_embedder:
_target_: model.torch.pos_embed.AxialRotaryPositionEncoding
in_dim: 4
embed_dim: 256
num_heads: 4
base: 100.0

atom_hidden_size_dec: 256
atom_n_queries_dec: 32
atom_n_keys_dec: 128
atom_decoder_transformer:
_target_: model.torch.blocks.HomogenTrunk
depth: 1
block:
_target_: model.torch.blocks.DiTBlock
_partial_: True # because in the for loop we create a new module
hidden_size: 256
mlp_ratio: 4.0
use_swiglu: True # SwiGLU FFN
self_attention_layer:
_target_: model.torch.layers.EfficientSelfAttentionLayer
_partial_: True
hidden_size: 256
num_heads: 4
qk_norm: True
pos_embedder:
_target_: model.torch.pos_embed.AxialRotaryPositionEncoding
in_dim: 4
embed_dim: 256
num_heads: 4
base: 100.0
Loading