```python
# model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class GraphEncoder(nn.Module):
"""Multimodal Encoder + Graph message passing (IntentGraphNet, Sec. 3.3)."""
def __init__(self, in_dim, hidden_dim):
super().__init__()
self.embed = nn.Linear(in_dim, hidden_dim)
self.attn = nn.Linear(2 * hidden_dim, 1)
self.update = nn.Linear(hidden_dim, hidden_dim)
def forward(self, x, adj):
"""
x: [
```python
# model.py
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Dict, List, Tuple, Optional, Any
import math
import random
# -----------------------------
# Data structures
# -----------------------------
@dataclass
class WorldObject:
obj_id: str
cls: str
affordances: List[str]
linguistic_tags: List[str]
semantic_anchors: List[str]
state: Dict[str, Any] = field(default_factory=dict)
@dataclass
cl
```python
# model.py
# AeroPerceptNet + GeoNarrative Alignment (simplified PyTorch version)
# Author: your-name
# License: MIT
import torch
import torch.nn as nn
import torch.nn.functional as F
# ----------------------------
# Multimodal Fusion Encoding
# ----------------------------
class FusionEncoder(nn.Module):
def __init__(self, input_dim=64, latent_dim=64):
super().__init__()
self.vis_proj = nn.Linear(input_dim, latent_dim)
self.sem_proj = n
```python
# model.py
# MoLENet + KAST minimal PyTorch implementation
# Author: your-name
# License: MIT
import torch
import torch.nn as nn
import torch.nn.functional as F
# ----------------------------
# Tokenized Dual-Branch Encoder
# ----------------------------
class DualBranchEncoder(nn.Module):
def __init__(self, input_dim, latent_dim):
super().__init__()
# static branch
self.static = nn.Sequential(
nn.Linear(input_dim, latent_di
```python
# model.py
# Minimal PBEN + CAAS implementation in PyTorch
# Author: your-name
# License: MIT
import torch
import torch.nn as nn
import torch.nn.functional as F
# ----------------------------
# PBEN: Polycentric Behavioral Embedding Network
# ----------------------------
class PBEN(nn.Module):
def __init__(self, input_dim=64, latent_dim=32, hidden_dim=64):
super().__init__()
self.latent_dim = latent_dim
# Linear projections
self
```python
# model.py
# Minimal PRIT + RRA + inter-phase contrastive loss + REDIP policy hooks
# Author: your-name
# License: MIT
import torch
import torch.nn as nn
import torch.nn.functional as F
# ----------------------------
# Phase-aware Reflective Interaction Transformer (PRIT)
# ----------------------------
class PhaseAwareAttention(nn.Module):
def __init__(self, embed_dim, num_heads):
super().__init__()
self.attn = nn.MultiheadAttention(embed_dim, nu
```python
# model.py
# Minimal GlypticNet + 3CL-style losses in PyTorch
# Author: your-name
# License: MIT
from typing import Optional, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
# ----------------------------
# Utility: simple GCN layer
# ----------------------------
class GCNLayer(nn.Module):
"""
Basic GCN layer with A_hat = A + I and symmetric normalization.
X_{l+1} = ReLU( D^{-1/2} A_hat D^{-1/2} X_l W )
"""
def __init_
```python
# model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
# -------------------------------
# Modality-Specific Encoders
# -------------------------------
class ModalityEncoder(nn.Module):
def __init__(self, in_dim, out_dim):
super().__init__()
self.net = nn.Sequential(
nn.Linear(in_dim, out_dim),
nn.ReLU(),
nn.LayerNorm(out_dim)
)
def forward(self, x):
return self.net(x)
```python
# model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
# ----------------------------
# Patch Embedding
# ----------------------------
class PatchEmbed(nn.Module):
def __init__(self, in_ch=3, embed_dim=128, patch_size=16):
super().__init__()
self.proj = nn.Conv2d(in_ch, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x) # (B,D,H/ps,W/ps)
x = x.flatten(2).transpose(1
```python
# model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
# -------------------------------
# Basic Convolutional Encoder
# -------------------------------
class ConvBlock(nn.Module):
def __init__(self, in_ch, out_ch, k=3, s=1, p=1):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(in_ch, out_ch, k, s, p),
nn.BatchNorm2d(out_ch),
nn.ReLU(inplace=True)
)
def forward(self, x):
```python
# model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
class PatchEmbed(nn.Module):
"""Flatten MRI patch into token embeddings"""
def __init__(self, in_channels=1, embed_dim=128, patch_size=16):
super().__init__()
self.proj = nn.Conv2d(in_channels, embed_dim, kernel_size=patch_size, stride=patch_size)
def forward(self, x):
x = self.proj(x) # (B, embed_dim, H/ps, W/ps)
x = x.flatten(2).transpose(1, 2) # (
```python
# model.py
# RecurAlignNet + T-GEP (reference implementation)
# Author: Legend Co., Ltd. (implementation based on the uploaded paper)
# License: MIT (adjust as needed)
from typing import Dict, List, Optional, Tuple
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
# ----------------------------
# Utilities
# ----------------------------
class TimeEncoding(nn.Module):
"""Sinusoidal or learned time encoding; here we use learned embed
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchdiffeq import odeint
class TemporalEncoder(nn.Module):
"""BiGRU-based encoder for multimodal temporal data."""
def __init__(self, input_dim, hidden_dim, latent_dim):
super().__init__()
self.bigru = nn.GRU(input_dim, hidden_dim, batch_first=True, bidirectional=True)
self.fc = nn.Linear(2 * hidden_dim, latent_dim)
def forward(self, x):
h, _ = self.big
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class LatentMultiAgentEncoding(nn.Module):
"""Latent Multi-Agent Encoding (LME) module."""
def __init__(self, input_dim, latent_dim):
super().__init__()
self.encoder = nn.Linear(input_dim, latent_dim)
self.decoder = nn.Linear(latent_dim, input_dim)
def forward(self, x):
z = F.relu(self.encoder(x))
recon = self.decoder(z)
return z, recon
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class TemporalConvEncoder(nn.Module):
"""Temporal convolution + positional encoding for patient trajectories."""
def __init__(self, input_dim, hidden_dim, num_layers=3, kernel_size=3):
super().__init__()
self.input_fc = nn.Linear(input_dim, hidden_dim)
self.convs = nn.ModuleList([
nn.Conv1d(hidden_dim, hidden_dim, kernel_size, padding="same", dilation=2**i)
```python
import torch
import torch.nn as nn
import torch.nn.functional as F
class TemporalEncoder(nn.Module):
"""GRU-based temporal encoder for patient clinical sequences."""
def __init__(self, input_dim, hidden_dim):
super(TemporalEncoder, self).__init__()
self.fc_in = nn.Linear(input_dim, hidden_dim)
self.gru = nn.GRU(hidden_dim, hidden_dim, batch_first=True)
def forward(self, x):
# x: [batch, seq_len, input_dim]
x = F.relu