import numpy as np
import pandas as pd
# Ensure datetime is parsed
df['datetime'] = pd.to_datetime(df['datetime'])
# ===== CORE TIME FEATURES =====
df['hour'] = df['datetime'].dt.hour
df['minute'] = df['datetime'].dt.minute
df['second'] = df['datetime'].dt.second
df['day_of_week'] = df['datetime'].dt.dayofweek
df['day_name'] = df['datetime'].dt.day_name()
df['day_of_month'] = df['datetime'].dt.day
df['day_of_year'] = df['datetime'].dt.dayofyear
df['week_of_year'] = df['datetime']import os
# Define the folder and file structure
structure = {
"smart-chatbot": {
"backend": {
"__init__.py": "",
"main.py": "# FastAPI application entry point\n",
"models.py": "# SQLAlchemy models\n",
"schemas.py": "# Pydantic schemas\n",
"database.py": "# Database configuration\n",
"chatbot": {
"__init__.py": "",
"chain.py": "# LangChain conversation chain\n",
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.seasonal import seasonal_decompose
# Sample time series data
# Replace this with your own time series
date_range = pd.date_range(start='2020-01-01', periods=36, freq='M')
data = pd.Series([i + (i % 12) * 2 for i in range(36)], index=date_range)
# Perform seasonal decomposition
result = seasonal_decompose(data, model='additive', period=12)
# Plot the decomposition
result.plot()
plt.tight_layout()
plt.show()from sklearn.feature_selection import VarianceThreshold
import pandas as pd
# Load your dataset
df = pd.read_csv("your_data.csv") # Replace with actual path
# Drop non-numeric columns if needed
df_numeric = df.select_dtypes(include='number')
# Apply Variance Threshold
selector = VarianceThreshold(threshold=0.01) # Adjust threshold as needed
selected_array = selector.fit_transform(df_numeric)
# Get selected feature names
selected_features = df_numeric.columns[selector.get_suppimport pandas as pd
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.preprocessing import StandardScaler
# Load and preprocess
df = pd.read_csv("your_data.csv")
df_numeric = df.select_dtypes(include='number').dropna()
# Optional: scale features for stability
scaler = StandardScaler()
X_scaled = scaler.fit_transform(df_numeric)
# Compute VIF
vif_data = pd.DataFrame()
vif_data["Feature"] = df_numeric.columns
vif_data["VIF"] = [variance_inflaimport pandas as pd
from sklearn.feature_selection import SelectKBest, f_classif
# Load your dataset
df = pd.read_csv("your_data.csv")
# Separate features and target
X = df.drop("target_column", axis=1) # Replace with your actual target column
y = df["target_column"]
# Keep only numeric features
X_numeric = X.select_dtypes(include='number')
# Apply ANOVA F-test
selector = SelectKBest(score_func=f_classif, k='all') # Use 'all' to score all features
selector.fit(X_numeric, y)
import pandas as pd
from datetime import datetime, timedelta
import numpy as np
from sklearn.preprocessing import MinMaxScaler
# ===============================
# 1. DATE RANGE GENERATION
# ===============================
today = pd.to_datetime(datetime.now().date())
end_date = today + timedelta(days=30)
# Generate DatetimeIndex with hourly frequency
date_range = pd.date_range(start=today, end=end_date, freq='H')
# Create base DataFrame
forecast_next_30_days_df = pd.DataFrame({# ==============================================
# Chi-Square Test Feature Selection with Visualization
# ==============================================
# Select features with:
# - High Chi2 score
# - Low p-value (typically < 0.05)
import pandas as pd
import numpy as np
from sklearn.feature_selection import SelectKBest, chi2
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler
# -------------------------------
# STEP 1: Prepare Datimport logging
import os
from datetime import datetime
# ================================================================
# LOGGER UTILITY
# ================================================================
def get_logger(logger_name: str,
log_dir: str = "logs",
level: int = logging.INFO) -> logging.Logger:
"""
Returns a configured logger instance.
Parameters
----------
logger_name : str
Name of the logger.
log_dirimport sys
from datetime import datetime
class ProjectException(Exception):
"""
Custom exception class for handling project-level errors across
ML/DL pipelines. Captures full traceback and provides a clear,
structured error message format.
Usage:
raise ProjectException(e, sys)
"""
def __init__(self, error_message: Exception, error_details: sys):
"""
Creates a formatted exception containing contextual traceback information.
.simular-disabled {
background-color: #ffffff; /* fundo branco */
color: #000; /* texto normal */
border-width: 1px;
border-style: dashed; /* <<< em vez de dotted */
border-color: #e0e0e0; /* cinza bem clarinho */
pointer-events: none;
opacity: 1;
cursor: default;
padding: 4px 8px; /* mesmo respiro visual do APEX */
}
function desabilitaCampos(ids) {
ids.forEach(function(id) {
var el =npx playwright testAlways prioritize using the following groups:
- Email
- Communication
- Coding
- Shopping
- Social Media
- Multimedia
You should try to prioritize just the groups I mentioned and when possible if a tab makes sense in one of those groups, always try to place it in one of them. However if there are more than one tabs open that do not fit in an existing group, but you see a logical way to group them, feel free to create additional groups.
The following are explicit instructions on how to organizenpx playwright -V<div ここにmask-imageプロパティを動的に着脱するとチラつきが発生する></div>
<style>
div{
mask-image:hoge; /* mask-imageプロパティは固定で記述しておき、*/
mask-size: calc(infinity + 1vmax) /* 巨大なmask-sizeを初期状態にするなどで回避する*/
}
</style>/**
* @param {number[]} nums
* @param {number} k
* @return {boolean}
*/
var kLengthApart = function(nums, k) {
// Keep track of the index of the last '1' we saw
let lastOneIndex = -1;
// Loop through the array
for (let i = 0; i < nums.length; i++) {
// If we find a '1'
if (nums[i] === 1) {
// Case 1: If this is not the first '1' we've seen
if (lastOneIndex !== -1) {
// Check the distance between this '1' and the previou