Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Perforated AI OGBN Example #9926

Open
wants to merge 38 commits into
base: master
Choose a base branch
from
Open
Changes from 1 commit
Commits
Show all changes
38 commits
Select commit Hold shift + click to select a range
1157d80
checking in graphSage example original file from the branch I was usi…
RorryB Dec 17, 2024
7446d41
removed PAI and _original from first PR. moved _scheduler to replace…
RorryB Dec 18, 2024
6f66d89
rebase with master
RorryB Dec 19, 2024
df7fa07
updated changelog
RorryB Jan 8, 2025
b5cebd9
Merge branch 'master' into master
RorryB Jan 8, 2025
b856e0c
added perforated ai ogbn example
RorryB Jan 8, 2025
0496528
updated changelog and intro comment
RorryB Jan 8, 2025
2b63eb7
Merge branch 'pyg-team:master' into master
RorryB Jan 16, 2025
437170e
updated formatting
RorryB Jan 16, 2025
e877f58
added perforated ai ogbn example
RorryB Jan 8, 2025
e4a7825
updated changelog and intro comment
RorryB Jan 8, 2025
e6da90f
Merge branch 'paiExample' of https://github.com/PerforatedAI/pytorch_…
RorryB Jan 16, 2025
d8ce9c8
Merge branch 'pyg-team:master' into master
RorryB Jan 17, 2025
60e3444
formatting updates
RorryB Jan 19, 2025
d77f5b3
updated comment
RorryB Jan 19, 2025
637628a
Merge branch 'pyg-team:master' into master
RorryB Jan 19, 2025
a291449
added perforated ai ogbn example
RorryB Jan 8, 2025
4f7bc6d
updated changelog and intro comment
RorryB Jan 8, 2025
38dd700
added perforated ai ogbn example
RorryB Jan 8, 2025
5d0253c
Merge branch 'paiExample' of https://github.com/PerforatedAI/pytorch_…
RorryB Jan 19, 2025
3b25b56
formatting 1
RorryB Jan 19, 2025
0415e96
formatting 2
RorryB Jan 19, 2025
59c7d41
formatting 3
RorryB Jan 19, 2025
ac56feb
formatting 4
RorryB Jan 19, 2025
e13c37a
formatting 5
RorryB Jan 19, 2025
97e711e
formatting 6
RorryB Jan 19, 2025
adee77d
formatting 6
RorryB Jan 19, 2025
e94442c
formatting 7
RorryB Jan 19, 2025
85a8aae
formatting 8
RorryB Jan 19, 2025
d3931e9
reverting README
RorryB Jan 19, 2025
b20e47d
formatting 9
RorryB Jan 19, 2025
20a5481
formatting 10
RorryB Jan 19, 2025
c4935a8
Merge branch 'pyg-team:master' into paiExample
RorryB Feb 3, 2025
cd66d5d
updated examples README
RorryB Feb 6, 2025
f4ee7ea
Merge branch 'pyg-team:master' into paiExample
RorryB Feb 6, 2025
890f00d
Merge branch 'master' into paiExample
RorryB Feb 12, 2025
12d372d
Merge branch 'master' into paiExample
RorryB Feb 14, 2025
9e9a52d
Merge branch 'master' into paiExample
RorryB Feb 21, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
formatting updates
RorryB committed Jan 19, 2025
commit 60e3444b54b5c986a9e615a94b1b28106f7cf412
73 changes: 40 additions & 33 deletions examples/ogbn_train_perforatedai.py
Original file line number Diff line number Diff line change
@@ -17,10 +17,6 @@
from perforatedai import pb_globals as PBG
from perforatedai import pb_models as PBM
from perforatedai import pb_utils as PBU




'''
PAI README:

@@ -51,28 +47,27 @@

Results:

Test Accuracy: 78.29%
Test Accuracy: 78.37%

'''


# Set Perforated Backpropagation settings for this training run

# When to switch between Dendrite learning and neuron learning is determined by when the history of validation score or correlation scores shows those scores are no longer improving
PBG.switchMode = PBG.doingHistory
PBG.switchMode = PBG.doingHistory
# When calculating that just look at the current score, not a recent average
PBG.historyLookback = 1
# How many normal epochs to wait for before switching modes.
PBG.nEpochsToSwitch = 25
# How many normal epochs to wait for before switching modes.
PBG.nEpochsToSwitch = 25
PBG.pEpochsToSwitch = 15 # Same as above for Dendrite epochs
# The default shape of input tensors will be [batch size, number of neurons in the layer]
PBG.inputDimensions = [-1, 0]
# This allows Dendrites to train as long as they keep improving rather than capping
# Dendrite training cycles to only be as long as the first neuron training cycle.
PBG.capAtN = False
PBG.capAtN = False
# Stop the run after 4 dendrites are created
PBG.maxDendrites = 4
# If a set of Dendrites does not improve the system try it 3 times before giving up
# If a set of Dendrites does not improve the system try it 2 times before giving up
PBG.maxDendriteTries = 2
# Make sure correlation scores improve from epoch to epoch by at least 25% and a raw value of 1e-4 to conclude that the correlation scores have gone up.
PBG.pbImprovementThreshold = 0.25
@@ -108,8 +103,7 @@
help='number of neighbors in each layer')
parser.add_argument('--hidden_channels', type=int, default=256)
# This can be set to 0 to run this code without Perforated Backpropagation happening.
parser.add_argument('--doingPB', type=int, default=1,
help='doing PB')
parser.add_argument('--doingPB', type=int, default=1, help='doing PB')
parser.add_argument('--lr', type=float, default=0.003)
parser.add_argument('--wd', type=float, default=0.0)
parser.add_argument('--dropout', type=float, default=0.5)
@@ -255,24 +249,32 @@ def test(loader: NeighborLoader) -> float:
# This initializes the Perforated Backpropagation Tracker object which organizes
# communication between each individual Dendrite convereted module within a full network
PBG.pbTracker.initialize(
doingPB = args.doingPB, #This can be set to false if you want to do just normal training
saveName=args.saveName, # Change the save name for different parameter runs
maximizingScore=True, # True for maximizing validation score, false for minimizing validation loss
makingGraphs=True) # True if you want graphs to be saved

doingPB=args.
doingPB, #This can be set to false if you want to do just normal training
saveName=args.
saveName, # Change the save name for different parameter runs
maximizingScore=
True, # True for maximizing validation score, false for minimizing validation loss
makingGraphs=True) # True if you want graphs to be saved
'''
# This can be added to pick up where it left off if something crashes.
print('pre loading')
model = PBU.loadSystem(model, args.saveName, 'latest')
print('loaded')

'''

# This change is required since the PBTracker object handles the scheduler
# All values are the same, just bassed in as kwargs
PBG.pbTracker.setOptimizer(torch.optim.Adam)
PBG.pbTracker.setScheduler(torch.optim.lr_scheduler.ReduceLROnPlateau)
schedArgs = {'mode':'max', 'patience': 5}
optimArgs = {'params':model.parameters(),'lr':args.lr, 'weight_decay':args.wd}
optimizer, scheduler = PBG.pbTracker.setupOptimizer(model, optimArgs, schedArgs)

schedArgs = {'mode': 'max', 'patience': 5}
optimArgs = {
'params': model.parameters(),
'lr': args.lr,
'weight_decay': args.wd
}
optimizer, scheduler = PBG.pbTracker.setupOptimizer(model, optimArgs,
schedArgs)

print(f'Total time before training begins took '
f'{time.perf_counter() - wall_clock_start:.4f}s')
@@ -282,7 +284,6 @@ def test(loader: NeighborLoader) -> float:
train_times = []
inference_times = []
best_val = 0.

'''
# These lines can be used to run full test with a final model file
num_epochs = 0
@@ -314,7 +315,6 @@ def test(loader: NeighborLoader) -> float:
times.append(time.perf_counter() - train_start)
# Add the extra scores to be graphed
PBG.pbTracker.addExtraScore(train_acc, 'Train Accuracy')

'''
Add the validation score
This is the function that determines when to add new Dendrites for Dendrite training
@@ -325,19 +325,26 @@ def test(loader: NeighborLoader) -> float:
restructured - If a restructing did happen
trainingComplete - if the tracker has determined that this is the final model to use
'''
model, improved, restructured, trainingComplete = PBG.pbTracker.addValidationScore(val_acc,
model,
args.saveName)
model, improved, restructured, trainingComplete = PBG.pbTracker.addValidationScore(
val_acc, model, args.saveName)
# Need to setup GPU settings of the new model
model = model.to(device)
# When training is complete break the loop of epochs
if(trainingComplete):
if (trainingComplete):
break
# If the network was restructured reinitialize the optimizer and scheduler for the new paramters
elif(restructured):
schedArgs = {'mode':'max', 'patience': 5} #Make sure this is lower than epochs to switch
optimArgs = {'params':model.parameters(),'lr':args.lr, 'weight_decay':args.wd}
optimizer, scheduler = PBG.pbTracker.setupOptimizer(model, optimArgs, schedArgs)
elif (restructured):
schedArgs = {
'mode': 'max',
'patience': 5
} #Make sure this is lower than epochs to switch
optimArgs = {
'params': model.parameters(),
'lr': args.lr,
'weight_decay': args.wd
}
optimizer, scheduler = PBG.pbTracker.setupOptimizer(
model, optimArgs, schedArgs)

print(f'Average Epoch Time on training: '
f'{torch.tensor(train_times).mean():.4f}s')