File: __init__.py

package info (click to toggle)
elpa 2022.11.001-4
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 33,636 kB
  • sloc: f90: 61,114; ansic: 39,478; sh: 4,795; cpp: 4,291; makefile: 923; asm: 834; python: 635; perl: 141
file content (79 lines) | stat: -rw-r--r-- 2,621 bytes parent folder | download | duplicates (4)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
"""pyelpa -- python wrapper for ELPA

This wrapper uses cython to wrap the C API of ELPA (Eigenvalue SoLvers for
Petaflop-Applications) so that it can be called from python.

Examples:

1. Use the Elpa object to access the eigenvectors/eigenvalues wrapper:

>>> import numpy as np
... from pyelpa import ProcessorLayout, DistributedMatrix, Elpa
... from mpi4py import MPI
... import sys
... 
... # set some parameters for matrix layout
... na = 1000
... nev = 200
... nblk = 16
... 
... # initialize processor layout, needed for calling ELPA
... comm = MPI.COMM_WORLD
... layout_p = ProcessorLayout(comm)
... 
... # create arrays
... a = DistributedMatrix(layout_p, na, nev, nblk)
... eigenvectors = DistributedMatrix(layout_p, na, nev, nblk)
... eigenvalues = np.zeros(na, dtype=np.float64)
... 
... # initialize elpa
... e = Elpa.from_distributed_matrix(a)
... 
... # set input matrix (a.data) on this core (a is stored in a block-cyclic
... # distributed layout; local size: a.na_rows x a.na_cols)
... # Caution: using this, the global matrix will not be symmetric; this is just
... # and example to show how to access the data
... a.data[:, :] = np.random.rand(a.na_rows, a.na_cols).astype(np.float64)
... 
... # now compute nev of na eigenvectors and eigenvalues
... e.eigenvectors(a.data, eigenvalues, eigenvectors.data)
... 
... # now eigenvectors.data contains the local part of the eigenvector matrix
... # which is stored in a block-cyclic distributed layout
... 
... # now eigenvalues contains all computed eigenvalues on all cores
... 
... # now compute nev of na eigenvalues
... e.eigenvalues(a.data, eigenvalues)
... 
... # now eigenvalues contains all computed eigenvalues on all cores


2. Use the functions provided by the DistributedMatrix object:

>>> import numpy as np
... from pyelpa import DistributedMatrix
... 
... # set some parameters for matrix layout
... na = 1000
... nev = 200
... nblk = 16
... 
... a = DistributedMatrix.from_comm_world(na, nev, nblk)
... # use a diagonal matrix as input
... matrix = np.diagflat(np.arange(na)**2)
... # set from global matrix
... a.set_data_from_global_matrix(matrix)
... 
... data = a.compute_eigenvectors()
... eigenvalues = data['eigenvalues']
... eigenvectors = data['eigenvectors']
... # now eigenvectors.data contains the local part of the eigenvector matrix
... # which is stored in a block-cyclic distributed layout
... 
... # now eigenvalues contains all computed eigenvalues on all cores
"""
from .wrapper import Elpa
from .distributedmatrix import ProcessorLayout, DistributedMatrix

__all__ = ['ProcessorLayout', 'DistributedMatrix', 'Elpa']