Skip to content

Commit

Permalink
Added all files
Browse files Browse the repository at this point in the history
  • Loading branch information
Ryan-PG committed Nov 29, 2024
0 parents commit ce3a9e4
Show file tree
Hide file tree
Showing 45 changed files with 2,566 additions and 0 deletions.
2,035 changes: 2,035 additions & 0 deletions Codes/Colab.Codes/transformers_example.ipynb

Large diffs are not rendered by default.

62 changes: 62 additions & 0 deletions Codes/cancer_app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import streamlit as st
import pandas as pd
from sklearn.datasets import load_breast_cancer

# Title
st.title("Breast Cancer Dataset Explorer")

# Load the dataset
@st.cache_data
def load_data():
cancer_data = load_breast_cancer()
df = pd.DataFrame(cancer_data.data, columns=cancer_data.feature_names)
df['target'] = cancer_data.target
df['target'] = df['target'].map({0: 'Malignant', 1: 'Benign'})
return df, cancer_data

# Load data
df, cancer_data = load_data()

# Display dataset
st.write("### Breast Cancer Dataset")
st.dataframe(df)

# Dataset insights
st.write("### Dataset Information")
st.write("Number of rows:", df.shape[0])
st.write("Number of columns:", df.shape[1])

# Summary statistics
st.write("### Summary Statistics")
st.write(df.describe())

# Sidebar options
st.sidebar.header("Filter Options")
selected_target = st.sidebar.multiselect("Select Diagnosis (Target)", df['target'].unique(), default=df['target'].unique())

# Filter dataset
filtered_df = df[df['target'].isin(selected_target)]

# Display filtered dataset
st.write("### Filtered Dataset")
st.dataframe(filtered_df)

# Visualization options
st.sidebar.header("Visualization Options")
chart_type = st.sidebar.selectbox("Select Chart Type", ["Scatter Plot", "Histogram"])

if chart_type == "Scatter Plot":
st.write("### Scatter Plot")
x_axis = st.sidebar.selectbox("X-Axis", cancer_data.feature_names)
y_axis = st.sidebar.selectbox("Y-Axis", cancer_data.feature_names)
st.write(f"Scatter plot: {x_axis} vs {y_axis}")
st.scatter_chart(filtered_df[[x_axis, y_axis]])
elif chart_type == "Histogram":
st.write("### Histogram")
hist_column = st.sidebar.selectbox("Select Column for Histogram", cancer_data.feature_names)
st.write(f"Histogram of {hist_column}")
st.bar_chart(filtered_df[hist_column])

# Display dataset target classes
st.write("### Target Classes")
st.write({0: "Malignant", 1: "Benign"})
217 changes: 217 additions & 0 deletions Codes/differences-between-python-packages.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,217 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### TensorFlow and Keras - Handwritten Digit Classification Using Neural Networks"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import tensorflow as tf\n",
"from tensorflow.keras.models import Sequential\n",
"from tensorflow.keras.layers import Dense, Flatten\n",
"\n",
"(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\n",
"x_train, x_test = x_train / 255.0, x_test / 255.0 # نرمال‌سازی داده‌ها\n",
"\n",
"model = Sequential([\n",
" Flatten(input_shape=(28, 28)),\n",
" Dense(128, activation='relu'),\n",
" Dense(10, activation='softmax')\n",
"])\n",
"\n",
"model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n",
"model.fit(x_train, y_train, epochs=5, validation_data=(x_test, y_test))\n",
"\n",
"test_loss, test_acc = model.evaluate(x_test, y_test)\n",
"print(f'Test Accuracy: {test_acc}')"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### PyTorch - Same handwritten digit classification model with PyTorch"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"from torchvision import datasets, transforms\n",
"\n",
"# بارگذاری و تبدیل داده‌ها\n",
"transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))])\n",
"train_set = datasets.MNIST(root='./data', train=True, download=True, transform=transform)\n",
"test_set = datasets.MNIST(root='./data', train=False, download=True, transform=transform)\n",
"train_loader = torch.utils.data.DataLoader(train_set, batch_size=64, shuffle=True)\n",
"test_loader = torch.utils.data.DataLoader(test_set, batch_size=64, shuffle=False)\n",
"\n",
"class NeuralNet(nn.Module):\n",
" def __init__(self):\n",
" super(NeuralNet, self).__init__()\n",
" self.fc1 = nn.Linear(28 * 28, 128)\n",
" self.fc2 = nn.Linear(128, 10)\n",
" \n",
" def forward(self, x):\n",
" x = x.view(-1, 28 * 28)\n",
" x = torch.relu(self.fc1(x))\n",
" return torch.log_softmax(self.fc2(x), dim=1)\n",
"\n",
"model = NeuralNet()\n",
"criterion = nn.NLLLoss()\n",
"optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
"\n",
"for epoch in range(5):\n",
" for images, labels in train_loader:\n",
" optimizer.zero_grad()\n",
" output = model(images)\n",
" loss = criterion(output, labels)\n",
" loss.backward()\n",
" optimizer.step()\n",
" print(f\"Epoch {epoch+1} completed\")\n",
"\n",
"correct = 0\n",
"with torch.no_grad():\n",
" for images, labels in test_loader:\n",
" output = model(images)\n",
" _, predicted = torch.max(output, 1)\n",
" correct += (predicted == labels).sum().item()\n",
"print(f'Test Accuracy: {correct / len(test_set)}')\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### Scikit-Learn - Flower Classification Using SVM Model"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"from sklearn import datasets\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.svm import SVC\n",
"from sklearn.metrics import accuracy_score\n",
"\n",
"iris = datasets.load_iris()\n",
"X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.3, random_state=42)\n",
"\n",
"model = SVC(kernel='linear')\n",
"model.fit(X_train, y_train)\n",
"\n",
"y_pred = model.predict(X_test)\n",
"print(f'Accuracy: {accuracy_score(y_test, y_pred)}')\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### MXNet - Handwritten Digit Recognition with Neural Network"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import mxnet as mx\n",
"from mxnet import autograd, gluon, nd\n",
"from mxnet.gluon import nn, Trainer\n",
"from mxnet.gluon.data.vision import datasets, transforms\n",
"\n",
"# بارگذاری و تبدیل داده‌ها\n",
"transformer = transforms.Compose([transforms.ToTensor(), transforms.Normalize(0.13, 0.31)])\n",
"train_data = datasets.MNIST(train=True).transform_first(transformer)\n",
"test_data = datasets.MNIST(train=False).transform_first(transformer)\n",
"\n",
"train_loader = mx.gluon.data.DataLoader(train_data, batch_size=64, shuffle=True)\n",
"test_loader = mx.gluon.data.DataLoader(test_data, batch_size=64, shuffle=False)\n",
"\n",
"# ساختار مدل\n",
"net = nn.Sequential()\n",
"net.add(nn.Dense(128, activation='relu'))\n",
"net.add(nn.Dense(10))\n",
"net.initialize(mx.init.Xavier())\n",
"\n",
"# آموزش مدل\n",
"trainer = Trainer(net.collect_params(), 'adam', {'learning_rate': 0.001})\n",
"loss_fn = gluon.loss.SoftmaxCrossEntropyLoss()\n",
"\n",
"for epoch in range(5):\n",
" cumulative_loss = 0\n",
" for data, label in train_loader:\n",
" with autograd.record():\n",
" output = net(data)\n",
" loss = loss_fn(output, label)\n",
" loss.backward()\n",
" trainer.step(batch_size=64)\n",
" cumulative_loss += loss.mean().asscalar()\n",
" print(f'Epoch {epoch+1}, Loss: {cumulative_loss / len(train_loader)}')\n",
"\n",
"# ارزیابی مدل\n",
"acc = mx.metric.Accuracy()\n",
"for data, label in test_loader:\n",
" output = net(data)\n",
" predictions = output.argmax(axis=1)\n",
" acc.update(preds=predictions, labels=label)\n",
"print(f'Test Accuracy: {acc.get()[1]}')\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"#### XGBoost - Flower Classification with XGBoost"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import xgboost as xgb\n",
"from sklearn.datasets import load_iris\n",
"from sklearn.model_selection import train_test_split\n",
"from sklearn.metrics import accuracy_score\n",
"\n",
"# بارگذاری مجموعه داده Iris\n",
"iris = load_iris()\n",
"X_train, X_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.3, random_state=42)\n",
"\n",
"# ساخت و آموزش مدل XGBoost\n",
"model = xgb.XGBClassifier(use_label_encoder=False, eval_metric='mlogloss')\n",
"model.fit(X_train, y_train)\n",
"\n",
"# پیش‌بینی و ارزیابی\n",
"y_pred = model.predict(X_test)\n",
"print(f'Accuracy: {accuracy_score(y_test, y_pred)}')\n"
]
}
],
"metadata": {
"language_info": {
"name": "python"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
43 changes: 43 additions & 0 deletions Codes/g4f/g4f_streamlit.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
from openai import OpenAI
import streamlit as st
from langdetect import detect

model_options = [
"gpt-3.5-turbo",
"gpt-4",
"gpt-4-turbo",
"gpt-4o",
"gpt-4o-mini",
]
selected_model = st.selectbox("Select Model", model_options, index=3)

inp = st.text_input("Prompt")
but = st.button("Send")

if but and inp != "":
try:
lang = detect(inp)
except:
lang = "en"

text_direction = "rtl" if lang == "fa" else "ltr"

client = OpenAI(
api_key="NotNeededAnyAPIKey",
base_url="http://localhost:1337/v1",
)

response = client.chat.completions.create(
model=selected_model,
messages=[
{
"role": "user",
"content": inp
}
],
)

st.markdown(
f'<div style="direction: {text_direction};">{response.choices[0].message.content}</div>',
unsafe_allow_html=True
)
4 changes: 4 additions & 0 deletions Codes/g4f/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
g4f[all]
openai
streamlit
langdetect
55 changes: 55 additions & 0 deletions Codes/iris_app.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import streamlit as st
from sklearn.datasets import load_iris
import pandas as pd

# Load the Iris dataset
iris = load_iris()
iris_data = pd.DataFrame(
iris.data, columns=iris.feature_names
)
iris_data['target'] = iris.target
iris_data['target_name'] = iris_data['target'].map(
dict(enumerate(iris.target_names))
)

# Streamlit app
st.title("Iris Dataset Viewer")

# Add a description
st.write("""
This app allows you to explore the classic Iris dataset.
Use the controls to customize your view.
""")

# Show the dataset as a table
st.subheader("Iris Dataset")
if st.checkbox("Show dataset"):
st.dataframe(iris_data)

# Filter by target species
st.subheader("Filter by Species")
species = st.multiselect(
"Select species to filter:",
options=iris.target_names,
default=iris.target_names
)

filtered_data = iris_data[iris_data['target_name'].isin(species)]

# Show filtered data
st.write(f"Showing {len(filtered_data)} rows for the selected species.")
st.dataframe(filtered_data)

# Plot the data
st.subheader("Pairplot of Features")
if st.checkbox("Show pairplot (requires seaborn)"):
import seaborn as sns
import matplotlib.pyplot as plt

sns.set_theme(style="ticks")
pairplot_fig = sns.pairplot(
filtered_data,
vars=iris.feature_names,
hue='target_name'
)
st.pyplot(pairplot_fig)
Loading

0 comments on commit ce3a9e4

Please sign in to comment.