cycles.py 6.23 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import matplotlib.pyplot as plt
import networkx as nx
import os
import pickle
import random
from torch.utils.data import Dataset


def get_previous(i, v_max):
    if i == 0:
        return v_max
    else:
        return i - 1


def get_next(i, v_max):
    if i == v_max:
        return 0
    else:
        return i + 1


def is_cycle(g):
    size = g.number_of_nodes()

    if size < 3:
        return False

    for node in range(size):
        neighbors = g.successors(node)

        if len(neighbors) != 2:
            return False

        if get_previous(node, size - 1) not in neighbors:
            return False

        if get_next(node, size - 1) not in neighbors:
            return False

    return True


def get_decision_sequence(size):
    """
    Get the decision sequence for generating valid cycles with DGMG for teacher
    forcing optimization.
    """
    decision_sequence = []

    for i in range(size):
        decision_sequence.append(0)  # Add node

        if i != 0:
            decision_sequence.append(0)  # Add edge
            decision_sequence.append(i - 1)  # Set destination to be previous node.

        if i == size - 1:
            decision_sequence.append(0)  # Add edge
            decision_sequence.append(0)  # Set destination to be the root.

        decision_sequence.append(1)  # Stop adding edge

    decision_sequence.append(1)  # Stop adding node

    return decision_sequence


def generate_dataset(v_min, v_max, n_samples, fname):
    samples = []
    for _ in range(n_samples):
        size = random.randint(v_min, v_max)
        samples.append(get_decision_sequence(size))

    with open(fname, 'wb') as f:
        pickle.dump(samples, f)


class CycleDataset(Dataset):
    def __init__(self, fname):
        super(CycleDataset, self).__init__()

        with open(fname, 'rb') as f:
            self.dataset = pickle.load(f)

    def __len__(self):
        return len(self.dataset)

    def __getitem__(self, index):
        return self.dataset[index]

Mufei Li's avatar
Mufei Li committed
92
    def collate_single(self, batch):
93
94
95
        assert len(batch) == 1, 'Currently we do not support batched training'
        return batch[0]

Mufei Li's avatar
Mufei Li committed
96
97
98
    def collate_batch(self, batch):
        return batch

99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129

def dglGraph_to_adj_list(g):
    adj_list = {}
    for node in range(g.number_of_nodes()):
        # For undirected graph. successors and
        # predecessors are equivalent.
        adj_list[node] = g.successors(node).tolist()
    return adj_list


class CycleModelEvaluation(object):
    def __init__(self, v_min, v_max, dir):
        super(CycleModelEvaluation, self).__init__()

        self.v_min = v_min
        self.v_max = v_max

        self.dir = dir

    def rollout_and_examine(self, model, num_samples):
        assert not model.training, 'You need to call model.eval().'

        num_total_size = 0
        num_valid_size = 0
        num_cycle = 0
        num_valid = 0
        plot_times = 0
        adj_lists_to_plot = []

        for i in range(num_samples):
            sampled_graph = model()
Mufei Li's avatar
Mufei Li committed
130
131
132
133
134
135
136
137
            if isinstance(sampled_graph, list):
                # When the model is a batched implementation, a list of
                # DGLGraph objects is returned. Note that with model(),
                # we generate a single graph as with the non-batched
                # implementation. We actually support batched generation
                # during the inference so feel free to modify the code.
                sampled_graph = sampled_graph[0]

138
139
140
            sampled_adj_list = dglGraph_to_adj_list(sampled_graph)
            adj_lists_to_plot.append(sampled_adj_list)

Mufei Li's avatar
Mufei Li committed
141
142
            graph_size = sampled_graph.number_of_nodes()
            valid_size = (self.v_min <= graph_size <= self.v_max)
143
144
            cycle = is_cycle(sampled_graph)

Mufei Li's avatar
Mufei Li committed
145
            num_total_size += graph_size
146
147
148
149
150
151
152
153
154
155

            if valid_size:
                num_valid_size += 1

            if cycle:
                num_cycle += 1

            if valid_size and cycle:
                num_valid += 1

Mufei Li's avatar
Mufei Li committed
156
            if len(adj_lists_to_plot) >= 4:
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
                plot_times += 1
                fig, ((ax0, ax1), (ax2, ax3)) = plt.subplots(2, 2)
                axes = {0: ax0, 1: ax1, 2: ax2, 3: ax3}
                for i in range(4):
                    nx.draw_circular(nx.from_dict_of_lists(adj_lists_to_plot[i]),
                                     with_labels=True, ax=axes[i])

                plt.savefig(self.dir + '/samples/{:d}'.format(plot_times))
                plt.close()

                adj_lists_to_plot = []

        self.num_samples_examined = num_samples
        self.average_size = num_total_size / num_samples
        self.valid_size_ratio = num_valid_size / num_samples
        self.cycle_ratio = num_cycle / num_samples
        self.valid_ratio = num_valid / num_samples

    def write_summary(self):

        def _format_value(v):
            if isinstance(v, float):
                return '{:.4f}'.format(v)
            elif isinstance(v, int):
                return '{:d}'.format(v)
            else:
                return '{}'.format(v)

        statistics = {
            'num_samples': self.num_samples_examined,
            'v_min': self.v_min,
            'v_max': self.v_max,
            'average_size': self.average_size,
            'valid_size_ratio': self.valid_size_ratio,
            'cycle_ratio': self.cycle_ratio,
            'valid_ratio': self.valid_ratio
        }

        model_eval_path = os.path.join(self.dir, 'model_eval.txt')

        with open(model_eval_path, 'w') as f:
            for key, value in statistics.items():
                msg = '{}\t{}\n'.format(key, _format_value(value))
                f.write(msg)

        print('Saved model evaluation statistics to {}'.format(model_eval_path))


class CyclePrinting(object):
    def __init__(self, num_epochs, num_batches):
        super(CyclePrinting, self).__init__()

        self.num_epochs = num_epochs
        self.num_batches = num_batches
        self.batch_count = 0

    def update(self, epoch, metrics):
        self.batch_count = (self.batch_count) % self.num_batches + 1

        msg = 'epoch {:d}/{:d}, batch {:d}/{:d}'.format(epoch, self.num_epochs,
                                                        self.batch_count, self.num_batches)
        for key, value in metrics.items():
            msg += ', {}: {:4f}'.format(key, value)
        print(msg)