RWKV-Runner/finetune/lora/v6/fla/models/__init__.py

30 lines
1.5 KiB
Python
Raw Normal View History

# -*- coding: utf-8 -*-
from fla.models.abc import ABCConfig, ABCForCausalLM, ABCModel
from fla.models.delta_net import (DeltaNetConfig, DeltaNetForCausalLM,
DeltaNetModel)
from fla.models.gla import GLAConfig, GLAForCausalLM, GLAModel
from fla.models.hgrn import HGRNConfig, HGRNForCausalLM, HGRNModel
from fla.models.hgrn2 import HGRN2Config, HGRN2ForCausalLM, HGRN2Model
from fla.models.linear_attn import (LinearAttentionConfig,
LinearAttentionForCausalLM,
LinearAttentionModel)
from fla.models.mamba import MambaConfig, MambaForCausalLM, MambaModel
from fla.models.retnet import RetNetConfig, RetNetForCausalLM, RetNetModel
from fla.models.rwkv6 import RWKV6Config, RWKV6ForCausalLM, RWKV6Model
from fla.models.transformer import (TransformerConfig, TransformerForCausalLM,
TransformerModel)
__all__ = [
'ABCConfig', 'ABCForCausalLM', 'ABCModel',
'DeltaNetConfig', 'DeltaNetForCausalLM', 'DeltaNetModel',
'GLAConfig', 'GLAForCausalLM', 'GLAModel',
'HGRNConfig', 'HGRNForCausalLM', 'HGRNModel',
'HGRN2Config', 'HGRN2ForCausalLM', 'HGRN2Model',
'LinearAttentionConfig', 'LinearAttentionForCausalLM', 'LinearAttentionModel',
'MambaConfig', 'MambaForCausalLM', 'MambaModel',
'RetNetConfig', 'RetNetForCausalLM', 'RetNetModel',
'RWKV6Config', 'RWKV6ForCausalLM', 'RWKV6Model',
'TransformerConfig', 'TransformerForCausalLM', 'TransformerModel'
]