Skip to content

Commit

Permalink
Merge branch 'master' of https://github.com/snap-stanford/ogb
Browse files Browse the repository at this point in the history
  • Loading branch information
weihua916 committed Apr 7, 2021
2 parents 53389b3 + 62f38c7 commit aef41a7
Show file tree
Hide file tree
Showing 8 changed files with 11 additions and 19 deletions.
2 changes: 1 addition & 1 deletion examples/graphproppred/code2/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,7 @@ def decode_arr_to_seq(arr, idx2vocab):
'''


eos_idx_list = torch.nonzero(arr == len(idx2vocab) - 1) # find the position of __EOS__ (the last vocab in idx2vocab)
eos_idx_list = torch.nonzero(arr == len(idx2vocab) - 1, as_tuple=False) # find the position of __EOS__ (the last vocab in idx2vocab)
if len(eos_idx_list) > 0:
clippted_arr = arr[: torch.min(eos_idx_list)] # find the smallest __EOS__
else:
Expand Down
5 changes: 3 additions & 2 deletions examples/lsc/mag240m/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Baseline code for MAG-240M

Please refer to the **[OGB-LSC paper](https://arxiv.org/abs/2103.09430)** for the detailed setting.
- Please refer to the **[OGB-LSC paper](https://arxiv.org/abs/2103.09430)** for the detailed setting.
- Baseline code based on **[DGL](https://www.dgl.ai/)** is available **[here](https://github.com/dmlc/dgl/tree/master/examples/pytorch/ogb_lsc/MAG240M)**.

## Installation requirements
```
Expand Down Expand Up @@ -139,7 +140,7 @@ python rgnn.py --device=0 --model=rgat --evaluate
| Label Propagation | 58.44 | 56.29 | 0 | --- |
| SGC | 65.82 | 65.29 | 0.7M | GeForce RTX 2080 Ti (11GB GPU) |
| SIGN | 66.64 | 66.09 | 3.8M | GeForce RTX 2080 Ti (11GB GPU) |
| MLP+C&S | 66.98 | 66.09 | 0.5M | GeForce RTX 2080 Ti (11GB GPU) |
| MLP+C&S | 66.98 | 66.18 | 0.5M | GeForce RTX 2080 Ti (11GB GPU) |
| GraphSAGE | 67.32 | 66.25 | 4.9M | GeForce RTX 2080 Ti (11GB GPU) |
| GAT | 67.71 | 66.63 | 4.9M | GeForce RTX 2080 Ti (11GB GPU) |
| R-GraphSAGE | 70.21 | 68.94 | 12.2M | GeForce RTX 2080 Ti (11GB GPU) |
Expand Down
3 changes: 2 additions & 1 deletion examples/lsc/pcqm4m/README.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
# Baseline code for PCQM4M-LSC

Please refer to the **[OGB-LSC paper](https://arxiv.org/abs/2103.09430)** for the detailed setting.
- Please refer to the **[OGB-LSC paper](https://arxiv.org/abs/2103.09430)** for the detailed setting.
- Baseline code based on **[DGL](https://www.dgl.ai/)** is available **[here](https://github.com/dmlc/dgl/tree/master/examples/pytorch/ogb_lsc/PCQM4M)**.

## Installation requirements
```
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,17 +222,7 @@ def rmap_fname(self):
return 'relations.dict'

class KGDatasetWiki(KGDataset):
'''Load a knowledge graph FB15k
The FB15k dataset has five files:
* entities.dict stores the mapping between entity Id and entity name.
* relations.dict stores the mapping between relation Id and relation name.
* train.txt stores the triples in the training set.
* valid.txt stores the triples in the validation set.
* test.txt stores the triples in the test set.
The mapping between entity (relation) name and entity (relation) Id is stored as 'name\tid'.
The triples are stored as 'head_nid\trelation_id\ttail_nid'.
'''Load a knowledge graph wikikg
'''
def __init__(self, path, name='wikikg90m'):
self.name = name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ def __init__(self, dataset, args, ranks=64, has_importance=False):
else:
self.edge_parts = [np.arange(num_train)]
self.rel_parts = [np.arange(dataset.n_relations)]
self.cross_part = False
self.cross_part = True

self.g = ConstructGraph(triples, dataset.n_entities, args)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def test(args, model, test_samplers, step, rank=0, mode='Test'):
for sampler in test_samplers:
print(sampler.num_edges, sampler.batch_size)
for query, ans, candidate in tqdm(sampler, disable=not args.print_on_screen, total=ceil(sampler.num_edges/sampler.batch_size)):
log = model.forward_test_wikikg(query, ans, candidate, sampler.mode, gpu_id)
log = model.forward_test_wikikg(query, ans, candidate, sampler.mode, gpu_id).cpu()
logs[sampler.mode].append(log)
answers[sampler.mode].append(ans)
print("[{}] finished {} forward".format(rank, mode))
Expand Down
2 changes: 1 addition & 1 deletion ogb/linkproppred/evaluate.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,7 +232,7 @@ def _eval_mrr(self, y_pred_pos, y_pred_neg, type_info):
if type_info == 'torch':
y_pred = torch.cat([y_pred_pos.view(-1,1), y_pred_neg], dim = 1)
argsort = torch.argsort(y_pred, dim = 1, descending = True)
ranking_list = torch.nonzero(argsort == 0)
ranking_list = torch.nonzero(argsort == 0, as_tuple=False)
ranking_list = ranking_list[:, 1] + 1
hits1_list = (ranking_list <= 1).to(torch.float)
hits3_list = (ranking_list <= 3).to(torch.float)
Expand Down
2 changes: 1 addition & 1 deletion ogb/lsc/wikikg90m.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,7 +206,7 @@ def _calculate_mrr(self, correct_index, pred_top10):
- pred_top10: shape (num_eval_triplets, 10)
'''
# extract indices where correct_index is within top10
tmp = torch.nonzero(correct_index.view(-1,1) == pred_top10)
tmp = torch.nonzero(correct_index.view(-1,1) == pred_top10, as_tuple=False)

# reciprocal rank
# if rank is larger than 10, then set the reciprocal rank to 0.
Expand Down

0 comments on commit aef41a7

Please sign in to comment.