Skip to content

Fetch from upstream #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
69 changes: 40 additions & 29 deletions agents/inverter_policy.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"


### Can move to utils.network if appropriate
class Net(nn.Module):
def __init__(self, n_bus, n_inverters, shared_hidden_layer_sizes, indiv_hidden_layer_sizes, n_input = 3):
super(Net, self).__init__()
Expand Down Expand Up @@ -44,8 +45,8 @@ def __init__(self, n_bus, n_inverters, shared_hidden_layer_sizes, indiv_hidden_l
# # Set up non-linear network of Linear -> BatchNorm -> ReLU -> Dropout layers
# self.n_inverters = n_inverters
# layer_sizes = [4 * n_inverters] + shared_hidden_layer_sizes
# layers = reduce(operator.add,
# [[nn.Linear(a,b), nn.BatchNorm1d(b), nn.ReLU(), nn.Dropout(p=0.2)]
# layers = reduce(operator.add,
# [[nn.Linear(a,b), nn.BatchNorm1d(b), nn.ReLU(), nn.Dropout(p=0.2)]
# for a,b in zip(layer_sizes[0:-1], layer_sizes[1:])])
# layers += [nn.Linear(layer_sizes[-1], 2 * n_inverters)]
# self.nn = nn.Sequential(*layers)
Expand Down Expand Up @@ -80,11 +81,11 @@ def __init__(self, network, memory, lr, lam = 10, scaler = 1000, **env_params):

n_bus = env_params['n_bus']
self.gen_idx = env_params['gen_idx']
self.other_idx = [i for i in range(self.n_bus) if i not in self.gen_idx]
self.other_idx = [i for i in range(n_bus) if i not in self.gen_idx]

H = env_params['H']
R = H[:, :self.n_bus]
B = H[:, self.n_bus:]
R = H[:, :n_bus]
B = H[:, n_bus:]
R_new = np.vstack([np.hstack([R[self.gen_idx][:, self.gen_idx],
R[self.gen_idx][:, self.other_idx]]),
np.hstack([R[self.other_idx][:, self.gen_idx],
Expand Down Expand Up @@ -129,7 +130,7 @@ def __init__(self, network, memory, lr, lam = 10, scaler = 1000, **env_params):
P_av = cp.Parameter(len(self.gen_idx))

# Voltage: Apply to All Buses
z = cp.hstack([self.P, self.P_nc, self.Q, self.Q_nc]) # z: (70, )
z = cp.hstack([P, P_nc, Q, Q_nc]) # z: (70, )
constraints = [self.V_lower - self.V0 <= H_new@z,
H_new@z <= self.V_upper - self.V0]

Expand All @@ -152,55 +153,66 @@ def __init__(self, network, memory, lr, lam = 10, scaler = 1000, **env_params):
def forward(self, state, Sbus, P_av, inference_flag = True):
'''
Input:
state: [dV(k-1), P_nc, Q_nc]
state: [dV(k-1), P_nc, Q_nc]
where,
Z_nc = Z - Z0
May get (n, dim) or (dim);
Output:
P, Q (with repsect to the reference point)
'''
## Get information for non-controllable loads
P_nc = Sbus.real[self.other_idx] / self.scaler
Q_nc = Sbus.imag[self.other_idx] / self.scaler
self.P_nc.value = P_nc
self.Q_nc.value = Q_nc
self.P_av.value = P_av

if P_tilde.ndimension() == 1:
P_tilde, Q_tilde = self.nn(state.to(DEVICE)) # n x n_inverter
P_all = Sbus.real /self.scaler
Q_all = Sbus.imag /self.scaler
if len(Sbus.shape)==1:
P_nc = Sbus.real[self.other_idx] / self.scaler
Q_nc = Sbus.imag[self.other_idx] / self.scaler
elif len(Sbus.shape)==2:
P_nc = Sbus.real[:, self.other_idx] / self.scaler
Q_nc = Sbus.imag[:, self.other_idx] / self.scaler
else:
P_tilde, Q_tilde = self.nn(state.to(DEVICE)) # n x n_inverter
print("Well, not expected to happen")

P_tilde, Q_tilde = self.nn(state.to(DEVICE)) # n x n_inverter

## During inference if the action is already feasible, not need to project
if inference_flag:
if self.is_feasible(P_tilde.detach().clone()/self.scaler, Q_tilde.detach().clone()/self.scaler,
P_nc, Q_nc, P_av):
return P_tilde/self.scaler, Q_tilde/self.scaler
P_tilde = P_tilde.squeeze()
Q_tilde = Q_tilde.squeeze()
if self.is_feasible(P_tilde.detach().clone()/self.scaler,
Q_tilde.detach().clone()/self.scaler,
P_nc, Q_nc, P_av):
P_all[self.gen_idx] = P_tilde.detach().cpu().numpy() / self.scaler
Q_all[self.gen_idx] = Q_tilde.detach().cpu().numpy() / self.scaler
return P_all, Q_all
else:
try:
try:
P, Q = self.proj_layer(P_tilde/self.scaler, Q_tilde/self.scaler,
torch.tensor(P_nc).float().to(DEVICE),
torch.tensor(Q_nc).float().to(DEVICE),
torch.tensor(P_av).float().to(DEVICE))
self.proj_count += 1
P_all[self.gen_idx] = P.detach().cpu().numpy()
Q_all[self.gen_idx] = Q.detach().cpu().numpy()
except: # The solver dies for some reason
P = torch.zeros_like(P_tilde)
Q = torch.zeros_like(Q_tilde)
return P, Q
P_all[self.gen_idx] = 0
Q_all[self.gen_idx] = 0
return P_all, Q_all
else:
#pdb.set_trace()
P, Q = self.proj_layer(P_tilde/self.scaler, Q_tilde/self.scaler,
torch.tensor(P_nc).float().to(DEVICE),
torch.tensor(Q_nc).float().to(DEVICE),
torch.tensor(P_av).float().to(DEVICE))
proj_loss = self.mse(P.detach(), P_tilde/self.scaler) + self.mse(Q.detach(), Q_tilde/self.scaler)
proj_loss = self.mse(P.detach(), P_tilde/self.scaler) \
+ self.mse(Q.detach(), Q_tilde/self.scaler)
return P, Q, proj_loss

def update(self, batch_size = 64, n_batch = 16):
for _ in range(n_batch):
state, Sbus, P_av = self.memory.sample_batch(batch_size = batch_size)
P, Q, proj_loss = self.forward(state, Sbus, P_av, inference_flag = False)
#pdb.set_trace()
curtail = self.ReLU(torch.tensor(P_av).to(DEVICE) - P[:, self.gen_idx])
curtail = self.ReLU(torch.tensor(P_av).to(DEVICE) - P)
loss = curtail.mean() + self.lam * proj_loss
print(f'curtail = {curtail.mean().item()}, proj_loss = {proj_loss.item()}')

Expand All @@ -222,14 +234,13 @@ def is_feasible(self, P, Q, P_nc, Q_nc, P_av):
if torch.any(v < self.V_lower_torch -self.V0_torch - eps) | torch.any(v > self.V_upper_torch-self.V0_torch+eps):
return False

P = P[self.gen_idx] + self.P0_torch[self.gen_idx]
Q = Q[self.gen_idx] + self.Q0_torch[self.gen_idx]
P = P + self.P0_torch[self.gen_idx]
Q = Q + self.Q0_torch[self.gen_idx]
PQ = torch.stack([P, Q]) # (2, 21)
if torch.any(torch.norm(PQ, dim = 0) > self.S_rating_torch + eps):
return False

if torch.any(P < 0-eps) | torch.any(P > torch.tensor(P_av).to(DEVICE)+eps):
return False
else:
return True

return True