Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…into main
  • Loading branch information
ChrisZonghaoLi committed Oct 17, 2023
2 parents 7fc5c1d + c8f4e9d commit 14aa4f0
Show file tree
Hide file tree
Showing 4 changed files with 27 additions and 24 deletions.
13 changes: 8 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,18 +4,18 @@

---

This is the source code for the paper submitted to IEEE/ACM ICCAAD 2023.
This is the source code for the paper accepted to IEEE/ACM ICCAAD 2023. The final accepted manuscript can be found here: https://www.zonghaoli.com/ml_analog_ic.html.

---

## Repo Structure

All files you are lookring for are placed inside the folder `/python`.
All files you are looking for are placed inside the folder `/python`.
- `ldo.py`: define the LDO1 environment (Gymnasium compatible).
- `ldo_folded_cascode.py`: define the LDO2 environment (Gymnasium compatible).
- `ckt_graphs.py`: define the graph info as well as specifications for LDO1 and LDO2.
- `dev_params.py`: used to extrac device parameters such as threshold voltage and transconductance of transistors, providing the observations for RL.
- `ddpg.py`: where DDPG algorithm is stored.
- `dev_params.py`: used to extract device parameters such as threshold voltage and transconductance of transistors, providing the observations for RL.
- `ddpg.py`: where the DDPG algorithm is stored.
- `models.py`: where various GNN models are stored.
- `main.py`: run the optimization for LDO1.
- `main2.py`: run the optimization for LDO2.
Expand All @@ -25,4 +25,7 @@ All files you are lookring for are placed inside the folder `/python`.
- `/simulations`: store the SPICE files and it is where Ngspice is running.
- `ldo_rgcn_rl.ipynb`: a notebook summaries the work, it is outdated.


---
## Getting Started
You may just follow the tutorial in `ldo_rgcn_rl.ipynb`, which is outdated but the flow is the same.
If you want to have the update-to-date version running, simply execute `main.py` for LDO1 and `main2.py`for LDO2.
2 changes: 1 addition & 1 deletion python/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@
save_name_actor = f"Actor_{CktGraph().__class__.__name__}_{date}_noise={noise_type}_reward={best_reward:.2f}_{GNN().__class__.__name__}_rew_eng={rew_eng}.pth"

model_weight_critic = agent.critic.state_dict()
save_name_critic = f"Critic_{CktGraph().__class__.__name__}_{date}_noise={noise_type}_reward={best_reward:2f}_{GNN().__class__.__name__}_rew_eng={rew_eng}.pth"
save_name_critic = f"Critic_{CktGraph().__class__.__name__}_{date}_noise={noise_type}_reward={best_reward:.2f}_{GNN().__class__.__name__}_rew_eng={rew_eng}.pth"

torch.save(model_weight_actor, PWD + "/saved_weights/" + save_name_actor)
torch.save(model_weight_critic, PWD + "/saved_weights/" + save_name_critic)
Expand Down
2 changes: 1 addition & 1 deletion python/main2.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@
save_name_actor = f"Actor_{CktGraph().__class__.__name__}_{date}_noise={noise_type}_reward={best_reward:.2f}_{GNN().__class__.__name__}_rew_eng={rew_eng}.pth"

model_weight_critic = agent.critic.state_dict()
save_name_critic = f"Critic_{CktGraph().__class__.__name__}_{date}_noise={noise_type}_reward={best_reward:2f}_{GNN().__class__.__name__}_rew_eng={rew_eng}.pth"
save_name_critic = f"Critic_{CktGraph().__class__.__name__}_{date}_noise={noise_type}_reward={best_reward:.2f}_{GNN().__class__.__name__}_rew_eng={rew_eng}.pth"

torch.save(model_weight_actor, PWD + "/saved_weights/" + save_name_actor)
torch.save(model_weight_critic, PWD + "/saved_weights/" + save_name_critic)
Expand Down
34 changes: 17 additions & 17 deletions python/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,10 +271,10 @@ def __init__(self, CktGraph):

self.in_channels = self.num_node_features
self.out_channels = self.action_dim
self.conv1 = Linear(self.in_channels, 32)
self.conv2 = Linear(32, 32)
self.conv3 = Linear(32, 16)
self.conv4 = Linear(16, 16)
self.mlp1 = Linear(self.in_channels, 32)
self.mlp2 = Linear(32, 32)
self.mlp3 = Linear(32, 16)
self.mlp4 = Linear(16, 16)
self.lin1 = LazyLinear(self.out_channels)

def forward(self, state):
Expand All @@ -287,10 +287,10 @@ def forward(self, state):
actions = torch.tensor(()).to(device)
for i in range(batch_size):
x = state[i]
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.mlp1(x))
x = F.relu(self.mlp2(x))
x = F.relu(self.mlp3(x))
x = F.relu(self.mlp4(x))
x = self.lin1(torch.flatten(x))
x = torch.tanh(x).reshape(1, -1)
actions = torch.cat((actions, x), axis=0)
Expand All @@ -308,10 +308,10 @@ def __init__(self, CktGraph):

self.in_channels = self.num_node_features + self.action_dim
self.out_channels = 1
self.conv1 = Linear(self.in_channels, 32)
self.conv2 = Linear(32, 32)
self.conv3 = Linear(32, 16)
self.conv4 = Linear(16, 16)
self.mlp1 = Linear(self.in_channels, 32)
self.mlp2 = Linear(32, 32)
self.mlp3 = Linear(32, 16)
self.mlp4 = Linear(16, 16)
self.lin1 = LazyLinear(self.out_channels)

def forward(self, state, action):
Expand All @@ -325,11 +325,11 @@ def forward(self, state, action):
values = torch.tensor(()).to(device)
for i in range(batch_size):
x = data[i]
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = F.relu(self.mlp1(x))
x = F.relu(self.mlp2(x))
x = F.relu(self.mlp3(x))
x = F.relu(self.mlp4(x))
x = self.lin1(torch.flatten(x)).reshape(1, -1)
values = torch.cat((values, x), axis=0)

return values
return values

0 comments on commit 14aa4f0

Please sign in to comment.