atoms_dataset.py 11.1 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
import os
import random
import warnings
from collections import Counter
from typing import Any, Callable, Dict, List, Optional, Union

import numpy as np
import torch.utils.data
from ase.atoms import Atoms
from ase.data import chemical_symbols
from ase.io import write
from tqdm import tqdm

import sevenn._keys as KEY
import sevenn.train.dataload as dataload
import sevenn.util as util
from sevenn._const import NUM_UNIV_ELEMENT
from sevenn.atom_graph_data import AtomGraphData

_warn_avg_num_neigh = """SevenNetAtomsDataset does not provide correct avg_num_neigh
as it does not build graph. We will compute only random 10000 structures graph to
approximate this value. If you want more precise avg_num_neigh,
use SevenNetGraphDataset. If it is not viable due to memory limit, you
need online algorithm to do this , which is not yet implemented in the SevenNet"""


class SevenNetAtomsDataset(torch.utils.data.Dataset):
    """
    Args:
        cutoff: edge cutoff of given AtomGraphData
        files: list of filenames or dict describing how to parse the file
               ASE readable (with proper extension), structure_list, .sevenn_data,
               dict containing file_list (see dict_reader of train/dataload.py)
        info_dict_copy_keys: patch these keys from KEY.INFO to graph when accessing.
            default is KEY.DATA_WEIGHT and KEY.DATA_MODALITY, which may accessed
            while training.
        **process_kwargs: keyword arguments that will be passed into ase.io.read
    """

    def __init__(
        self,
        cutoff: float,
        files: Union[str, List[str]],
        atoms_filter: Optional[Callable] = None,
        atoms_transform: Optional[Callable] = None,
        transform: Optional[Callable] = None,
        use_data_weight: bool = False,
        **process_kwargs,
    ):
        self.cutoff = cutoff
        if isinstance(files, str):
            files = [files]  # user convenience
        files = [os.path.abspath(file) for file in files]
        self._files = files
        self.atoms_filter = atoms_filter
        self.atoms_transform = atoms_transform
        self.transform = transform
        self.use_data_weight = use_data_weight
        self._scanned = False
        self._avg_num_neigh_approx = None
        self.statistics = {}

        atoms_list = []
        for file in files:
            atoms_list.extend(
                SevenNetAtomsDataset.file_to_atoms_list(file, **process_kwargs)
            )
        self._atoms_list = atoms_list

        super().__init__()

    @staticmethod
    def file_to_atoms_list(file: Union[str, dict], **kwargs) -> List[Atoms]:
        if isinstance(file, dict):
            atoms_list = dataload.dict_reader(file)
        elif 'structure_list' in file:
            atoms_dct = dataload.structure_list_reader(file)
            atoms_list = []
            for lst in atoms_dct.values():
                atoms_list.extend(lst)
        else:
            atoms_list = dataload.ase_reader(file, **kwargs)
        return atoms_list

    def save(self, path):
        # Save atoms list as extxyz
        write(path, self._atoms_list, format='extxyz')

    def _graph_build(self, atoms):
        return dataload.atoms_to_graph(
            atoms, self.cutoff, transfer_info=False, y_from_calc=False
        )

    def __len__(self):
        return len(self._atoms_list)

    def __getitem__(self, index):
        atoms = self._atoms_list[index]
        if self.atoms_transform is not None:
            atoms = self.atoms_transform(atoms)

        graph = self._graph_build(atoms)
        if self.transform is not None:
            graph = self.transform(graph)

        if self.use_data_weight:
            weight = graph[KEY.INFO].pop(
                KEY.DATA_WEIGHT, {'energy': 1.0, 'force': 1.0, 'stress': 1.0}
            )
            graph[KEY.DATA_WEIGHT] = weight

        return AtomGraphData.from_numpy_dict(graph)

    @property
    def species(self):
        self.run_stat()
        return [z for z in self.statistics['_natoms'].keys() if z != 'total']

    @property
    def natoms(self):
        self.run_stat()
        return self.statistics['_natoms']

    @property
    def per_atom_energy_mean(self):
        self.run_stat()
        return self.statistics[KEY.PER_ATOM_ENERGY]['mean']

    @property
    def elemwise_reference_energies(self):
        from sklearn.linear_model import Ridge

        c = self.statistics['_composition']
        y = self.statistics[KEY.ENERGY]['_array']
        zero_indices = np.all(c == 0, axis=0)
        c_reduced = c[:, ~zero_indices]
        # will not 100% reproduce, as it is sorted by Z
        # train/dataset.py was sorted by alphabets of chemical species
        coef_reduced = Ridge(alpha=0.1, fit_intercept=False).fit(c_reduced, y).coef_
        full_coeff = np.zeros(NUM_UNIV_ELEMENT)
        full_coeff[~zero_indices] = coef_reduced
        return full_coeff.tolist()  # ex: full_coeff[1] = H_reference_energy

    @property
    def force_rms(self):
        self.run_stat()
        mean = self.statistics[KEY.FORCE]['mean']
        std = self.statistics[KEY.FORCE]['std']
        return float((mean**2 + std**2) ** (0.5))

    @property
    def per_atom_energy_std(self):
        self.run_stat()
        return self.statistics['per_atom_energy']['std']

    @property
    def avg_num_neigh(self, n_sample=10000):
        if self._avg_num_neigh_approx is None:
            if len(self) > n_sample:
                warnings.warn(_warn_avg_num_neigh)
            n_sample = min(len(self), n_sample)
            indices = random.sample(range(len(self)), n_sample)
            n_neigh = []
            for i in indices:
                graph = self[i]
                _, nn = np.unique(graph[KEY.EDGE_IDX][0], return_counts=True)
                n_neigh.append(nn)
            n_neigh = np.concatenate(n_neigh)
            self._avg_num_neigh_approx = np.mean(n_neigh)
        return self._avg_num_neigh_approx

    @property
    def sqrt_avg_num_neigh(self):
        self.run_stat()
        return self.avg_num_neigh**0.5

    def run_stat(self):
        """
        Loop over dataset and init any statistics might need
        Unlink SevenNetGraphDataset, neighbors count is not computed as
        it requires to build graph
        """
        if self._scanned is True:
            return  # statistics already computed
        y_keys: List[str] = [KEY.ENERGY, KEY.PER_ATOM_ENERGY, KEY.FORCE, KEY.STRESS]
        natoms_counter = Counter()
        composition = np.zeros((len(self), NUM_UNIV_ELEMENT))
        stats: Dict[str, Dict[str, Any]] = {y: {'_array': []} for y in y_keys}

        for i, atoms in tqdm(
            enumerate(self._atoms_list), desc='run_stat', total=len(self)
        ):
            z = atoms.get_atomic_numbers()
            natoms_counter.update(z.tolist())
            composition[i] = np.bincount(z, minlength=NUM_UNIV_ELEMENT)
            for y, dct in stats.items():
                if y == KEY.ENERGY:
                    dct['_array'].append(atoms.info['y_energy'])
                elif y == KEY.PER_ATOM_ENERGY:
                    dct['_array'].append(atoms.info['y_energy'] / len(atoms))
                elif y == KEY.FORCE:
                    dct['_array'].append(atoms.arrays['y_force'].reshape(-1))
                elif y == KEY.STRESS:
                    dct['_array'].append(atoms.info['y_stress'].reshape(-1))

        for y, dct in stats.items():
            if y == KEY.FORCE:
                array = np.concatenate(dct['_array'])
            else:
                array = np.array(dct['_array']).reshape(-1)
            dct.update(
                {
                    'mean': float(np.mean(array)),
                    'std': float(np.std(array)),
                    'median': float(np.quantile(array, q=0.5)),
                    'max': float(np.max(array)),
                    'min': float(np.min(array)),
                    '_array': array,
                }
            )

        natoms = {chemical_symbols[int(z)]: cnt for z, cnt in natoms_counter.items()}
        natoms['total'] = sum(list(natoms.values()))
        self.statistics.update(
            {
                '_composition': composition,
                '_natoms': natoms,
                **stats,
            }
        )
        self._scanned = True


# script, return dict of SevenNetAtomsDataset
def from_config(
    config: Dict[str, Any],
    working_dir: str = os.getcwd(),
    dataset_keys: Optional[List[str]] = None,
):
    from sevenn.logger import Logger

    log = Logger()
    if dataset_keys is None:
        dataset_keys = []
        for k in config:
            if k.startswith('load_') and k.endswith('_path'):
                dataset_keys.append(k)

    if KEY.LOAD_TRAINSET not in dataset_keys:
        raise ValueError(f'{KEY.LOAD_TRAINSET} must be present in config')

    # initialize arguments for loading dataset
    dataset_args = {
        'cutoff': config[KEY.CUTOFF],
        'use_data_weight': config.get(KEY.USE_WEIGHT, False),
        **config[KEY.DATA_FORMAT_ARGS],
    }

    datasets = {}
    for dk in dataset_keys:
        if not (paths := config[dk]):
            continue
        if isinstance(paths, str):
            paths = [paths]
        name = '_'.join([nn.strip() for nn in dk.split('_')[1:-1]])
        dataset_args.update({'files': paths})
        datasets[name] = SevenNetAtomsDataset(**dataset_args)

    if not config[KEY.COMPUTE_STATISTICS]:
        log.writeline(
            (
                'Computing statistics is skipped, note that if any of other'
                'configurations requires statistics (shift, scale, avg_num_neigh,'
                'chemical_species as auto), SevenNet eventually raise an error!'
            )
        )
        return datasets

    train_set = datasets['trainset']

    chem_species = set(train_set.species)
    # print statistics of each dataset
    for name, dataset in datasets.items():
        dataset.run_stat()
        log.bar()
        log.writeline(f'{name} distribution:')
        log.statistic_write(dataset.statistics)
        log.format_k_v('# atoms (node)', dataset.natoms, write=True)
        log.format_k_v('# structures (graph)', len(dataset), write=True)

        chem_species.update(dataset.species)
    log.bar()

    # initialize known species from dataset if 'auto'
    # sorted to alphabetical order (which is same as before)
    chem_keys = [KEY.CHEMICAL_SPECIES, KEY.NUM_SPECIES, KEY.TYPE_MAP]
    if all([config[ck] == 'auto' for ck in chem_keys]):  # see parse_input.py
        log.writeline('Known species are obtained from the dataset')
        config.update(util.chemical_species_preprocess(sorted(list(chem_species))))

    # retrieve shift, scale, conv_denominaotrs from user input (keyword)
    init_from_stats = [KEY.SHIFT, KEY.SCALE, KEY.CONV_DENOMINATOR]
    for k in init_from_stats:
        input = config[k]  # statistic key or numbers
        # If it is not 'str', 1: It is 'continue' training
        #                     2: User manually inserted numbers
        if isinstance(input, str) and hasattr(train_set, input):
            var = getattr(train_set, input)
            config.update({k: var})
            log.writeline(f'{k} is obtained from statistics')
        elif isinstance(input, str) and not hasattr(train_set, input):
            raise NotImplementedError(input)

    return datasets