Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 20 additions & 0 deletions torchtitan/experiments/transformers_modeling_backend/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,26 @@


flavors = {
"debugperf": HFTransformerModelArgs(
titan_dense_args=TitanDenseModelArgs(
dim=256,
n_layers=6,
n_heads=16,
n_kv_heads=16,
vocab_size=2048,
rope_theta=500000,
),
),
"debugperf_large": HFTransformerModelArgs(
titan_dense_args=TitanDenseModelArgs(
dim=1024,
n_layers=12,
n_heads=16,
n_kv_heads=16,
vocab_size=32000,
rope_theta=500000,
),
),
"debugmodel": HFTransformerModelArgs(
titan_dense_args=TitanDenseModelArgs(
dim=256,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -190,8 +190,8 @@ def apply_non_moe_tp(
layer_plan = {
"input_layernorm": SequenceParallel(),
"self_attn": prepare_module_input(
input_kwarg_layouts={"hidden_states": Shard(1)},
desired_input_kwarg_layouts={"hidden_states": Replicate()},
input_layouts=(Shard(1),),
desired_input_layouts=(Replicate(),),
),
"post_attention_layernorm": SequenceParallel(),
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
class HFTransformers:
model: str = ""
"""HuggingFace model ID (e.g., 'Qwen/Qwen3-4B-Instruct-2507')"""
tie_word_embeddings: bool = False
"""Whether to tie input embeddings and output projection weights (default: True for HF models)"""


@dataclass
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,7 @@ def update_from_config(self, job_config: JobConfig):
self.mlp_bias = False
self.use_cache = False
self.initializer_range = 1.0 # use as std for normal init in embedding
self.tie_word_embeddings = job_config.hf_transformers.tie_word_embeddings

if not hasattr(self, "inter_dim"): # Only for llama model
ffn_hidden_size = 4 * self.dim
Expand Down
Loading
Loading