From 6d2cd8ca469daf1154709bd4a8c292ae15b97145 Mon Sep 17 00:00:00 2001 From: "Harish G. Naik" Date: Thu, 21 Oct 2021 22:59:20 -0500 Subject: [PATCH 1/3] Removed Pytorch & Sklearn Warnings --- train.py | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/train.py b/train.py index 7c12912..41cd95e 100644 --- a/train.py +++ b/train.py @@ -89,9 +89,9 @@ def prepare_data(graphs, args, test_graphs=None, max_nodes=0): ) dataset_sampler = graph_utils.GraphSampler( - val_graphs, - normalize=False, - max_num_nodes=max_nodes, + val_graphs, + normalize=False, + max_num_nodes=max_nodes, features=args.feature_type ) val_dataset_loader = torch.utils.data.DataLoader( @@ -126,7 +126,7 @@ def prepare_data(graphs, args, test_graphs=None, max_nodes=0): ############################# # -# Training +# Training # ############################# def train( @@ -194,7 +194,7 @@ def train( else: loss = model.loss(ypred, label, adj, batch_num_nodes) loss.backward() - nn.utils.clip_grad_norm(model.parameters(), args.clip) + nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() iter += 1 avg_loss += loss @@ -294,7 +294,7 @@ def train_node_classifier(G, labels, model, args, writer=None): else: loss = model.loss(ypred_train, labels_train) loss.backward() - nn.utils.clip_grad_norm(model.parameters(), args.clip) + nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() #for param_group in optimizer.param_groups: @@ -424,7 +424,7 @@ def train_node_classifier_multigraph(G_list, labels, model, args, writer=None): else: loss = model.loss(ypred_train_cmp, labels_train) loss.backward() - nn.utils.clip_grad_norm(model.parameters(), args.clip) + nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() #for param_group in optimizer.param_groups: @@ -516,7 +516,7 @@ def evaluate(dataset, model, args, name="Validation", max_num_examples=None): preds = np.hstack(preds) result = { - "prec": metrics.precision_score(labels, preds, average="macro"), + "prec": metrics.precision_score(labels, preds, average="macro", zero_division=0), "recall": metrics.recall_score(labels, preds, average="macro"), "acc": metrics.accuracy_score(labels, preds), } @@ -534,13 +534,13 @@ def evaluate_node(ypred, labels, train_idx, test_idx): labels_test = np.ravel(labels[:, test_idx]) result_train = { - "prec": metrics.precision_score(labels_train, pred_train, average="macro"), + "prec": metrics.precision_score(labels_train, pred_train, average="macro", zero_division=0), "recall": metrics.recall_score(labels_train, pred_train, average="macro"), "acc": metrics.accuracy_score(labels_train, pred_train), "conf_mat": metrics.confusion_matrix(labels_train, pred_train), } result_test = { - "prec": metrics.precision_score(labels_test, pred_test, average="macro"), + "prec": metrics.precision_score(labels_test, pred_test, average="macro", zero_division=0), "recall": metrics.recall_score(labels_test, pred_test, average="macro"), "acc": metrics.accuracy_score(labels_test, pred_test), "conf_mat": metrics.confusion_matrix(labels_test, pred_test), @@ -1177,4 +1177,3 @@ def main(): if __name__ == "__main__": main() - From df6cf022e2b889b7c024cd267152d50628b8334f Mon Sep 17 00:00:00 2001 From: "Harish G. Naik" Date: Thu, 21 Oct 2021 23:02:21 -0500 Subject: [PATCH 2/3] Revert "Removed Pytorch & Sklearn Warnings" This reverts commit 6d2cd8ca469daf1154709bd4a8c292ae15b97145. --- train.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/train.py b/train.py index 41cd95e..7c12912 100644 --- a/train.py +++ b/train.py @@ -89,9 +89,9 @@ def prepare_data(graphs, args, test_graphs=None, max_nodes=0): ) dataset_sampler = graph_utils.GraphSampler( - val_graphs, - normalize=False, - max_num_nodes=max_nodes, + val_graphs, + normalize=False, + max_num_nodes=max_nodes, features=args.feature_type ) val_dataset_loader = torch.utils.data.DataLoader( @@ -126,7 +126,7 @@ def prepare_data(graphs, args, test_graphs=None, max_nodes=0): ############################# # -# Training +# Training # ############################# def train( @@ -194,7 +194,7 @@ def train( else: loss = model.loss(ypred, label, adj, batch_num_nodes) loss.backward() - nn.utils.clip_grad_norm_(model.parameters(), args.clip) + nn.utils.clip_grad_norm(model.parameters(), args.clip) optimizer.step() iter += 1 avg_loss += loss @@ -294,7 +294,7 @@ def train_node_classifier(G, labels, model, args, writer=None): else: loss = model.loss(ypred_train, labels_train) loss.backward() - nn.utils.clip_grad_norm_(model.parameters(), args.clip) + nn.utils.clip_grad_norm(model.parameters(), args.clip) optimizer.step() #for param_group in optimizer.param_groups: @@ -424,7 +424,7 @@ def train_node_classifier_multigraph(G_list, labels, model, args, writer=None): else: loss = model.loss(ypred_train_cmp, labels_train) loss.backward() - nn.utils.clip_grad_norm_(model.parameters(), args.clip) + nn.utils.clip_grad_norm(model.parameters(), args.clip) optimizer.step() #for param_group in optimizer.param_groups: @@ -516,7 +516,7 @@ def evaluate(dataset, model, args, name="Validation", max_num_examples=None): preds = np.hstack(preds) result = { - "prec": metrics.precision_score(labels, preds, average="macro", zero_division=0), + "prec": metrics.precision_score(labels, preds, average="macro"), "recall": metrics.recall_score(labels, preds, average="macro"), "acc": metrics.accuracy_score(labels, preds), } @@ -534,13 +534,13 @@ def evaluate_node(ypred, labels, train_idx, test_idx): labels_test = np.ravel(labels[:, test_idx]) result_train = { - "prec": metrics.precision_score(labels_train, pred_train, average="macro", zero_division=0), + "prec": metrics.precision_score(labels_train, pred_train, average="macro"), "recall": metrics.recall_score(labels_train, pred_train, average="macro"), "acc": metrics.accuracy_score(labels_train, pred_train), "conf_mat": metrics.confusion_matrix(labels_train, pred_train), } result_test = { - "prec": metrics.precision_score(labels_test, pred_test, average="macro", zero_division=0), + "prec": metrics.precision_score(labels_test, pred_test, average="macro"), "recall": metrics.recall_score(labels_test, pred_test, average="macro"), "acc": metrics.accuracy_score(labels_test, pred_test), "conf_mat": metrics.confusion_matrix(labels_test, pred_test), @@ -1177,3 +1177,4 @@ def main(): if __name__ == "__main__": main() + From a6ffd9b559010b70414582243e8bfa6da76c7389 Mon Sep 17 00:00:00 2001 From: "Harish G. Naik" Date: Thu, 21 Oct 2021 23:04:51 -0500 Subject: [PATCH 3/3] Cleanup --- train.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/train.py b/train.py index 7c12912..8ae9a91 100644 --- a/train.py +++ b/train.py @@ -194,7 +194,7 @@ def train( else: loss = model.loss(ypred, label, adj, batch_num_nodes) loss.backward() - nn.utils.clip_grad_norm(model.parameters(), args.clip) + nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() iter += 1 avg_loss += loss @@ -294,7 +294,7 @@ def train_node_classifier(G, labels, model, args, writer=None): else: loss = model.loss(ypred_train, labels_train) loss.backward() - nn.utils.clip_grad_norm(model.parameters(), args.clip) + nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() #for param_group in optimizer.param_groups: @@ -424,7 +424,7 @@ def train_node_classifier_multigraph(G_list, labels, model, args, writer=None): else: loss = model.loss(ypred_train_cmp, labels_train) loss.backward() - nn.utils.clip_grad_norm(model.parameters(), args.clip) + nn.utils.clip_grad_norm_(model.parameters(), args.clip) optimizer.step() #for param_group in optimizer.param_groups: @@ -516,7 +516,7 @@ def evaluate(dataset, model, args, name="Validation", max_num_examples=None): preds = np.hstack(preds) result = { - "prec": metrics.precision_score(labels, preds, average="macro"), + "prec": metrics.precision_score(labels, preds, average="macro", zero_division=0), "recall": metrics.recall_score(labels, preds, average="macro"), "acc": metrics.accuracy_score(labels, preds), } @@ -534,13 +534,13 @@ def evaluate_node(ypred, labels, train_idx, test_idx): labels_test = np.ravel(labels[:, test_idx]) result_train = { - "prec": metrics.precision_score(labels_train, pred_train, average="macro"), + "prec": metrics.precision_score(labels_train, pred_train, average="macro", zero_division=0), "recall": metrics.recall_score(labels_train, pred_train, average="macro"), "acc": metrics.accuracy_score(labels_train, pred_train), "conf_mat": metrics.confusion_matrix(labels_train, pred_train), } result_test = { - "prec": metrics.precision_score(labels_test, pred_test, average="macro"), + "prec": metrics.precision_score(labels_test, pred_test, average="macro", zero_division=0), "recall": metrics.recall_score(labels_test, pred_test, average="macro"), "acc": metrics.accuracy_score(labels_test, pred_test), "conf_mat": metrics.confusion_matrix(labels_test, pred_test),