@SnoopJeDi/

ExcitableMaroonAutoexec

Python

No description

fork
loading
Files
  • main.py
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
import numpy as np

SAMPLEDATA = """Generated by trjconv : graphene in water t=   0.00000 ###ignore###
 4014
 1729SOL     OW    1   2.991   2.196   1.749 -0.0000  0.0000 -0.0000
 3065SOL    HW1 4010   0.057   1.693   1.608  0.0000  0.0000  0.0000
 3065SOL    HW2 4011   0.076   1.658   1.766 -0.0000  0.0000 -0.0000
 3066SOL     OW 4012   2.040   1.183   1.129 -0.0000 -0.0000  0.0000
 3066SOL    HW1 4013   1.970   1.154   1.063  0.0000  0.0000  0.0000
 3066SOL    HW2 4014   2.125   1.202   1.080  0.0001  0.0000 -0.0001
   6.95000   6.63200   2.84000  ###ignore###
Generated by trjconv : graphene in water t=   0.50000  ###ignore###
 4014   ###ignore###
 1729SOL     OW    1   3.082   2.163   1.664  0.3748  0.3087 -0.4980
 1729SOL    HW1    2   3.123   2.079   1.699 -1.0283 -0.0863  0.2113
 1729SOL    HW2    3   3.145   2.239   1.679  1.3255 -0.4782 -0.4386
 3065SOL    HW2 4011   0.097   1.603   1.739  3.0982  2.0249 -0.5412
 3066SOL     OW 4012   2.134   1.182   1.096 -0.2602 -0.4156 -0.4006
 3066SOL    HW1 4013   2.203   1.249   1.067 -0.1512 -1.6699 -3.1684
 3066SOL    HW2 4014   2.056   1.230   1.136  0.8463  1.1988 -0.1258
   6.95000   6.63200   2.84000 ###ignore###"""

# old solution:
# qframes = [(np.genfromtxt(file_name,
#                           skip_header=2+line_count*frame_index+3*frame_index,
#                           max_rows = line_count,
#                           converters={1:lambda s: (-2  if (str(s, "UTF-8").startswith("O")) else 1)},
#                           usecols = np.r_[1,4,6]), frame_index) for frame_index in range(frame_count)]

def chunk(txt, prefix):
    """
    Partition an iterable of lines `txt` into lists separated by lines
    beginning with `prefix`
    """
    buf = []
    for line in txt:
        if line.lower().startswith(prefix) and buf:
            yield buf
            buf = [line]
        else:
            buf.append(line)
    yield buf

lookup = {'OW': -2.0,
          'default': 1.0}

def parse_step(frametxt):
    """
    Turn each frame into an ndarray by selecting the 3 columns we care about
    and munging the second column according to `lookup`
    """
    return np.genfromtxt(frametxt, usecols=[1,4,6],
            skip_header=2,
            skip_footer=1,
            converters={1: lambda k: lookup.get(k, lookup['default'])})

steptxt = chunk(SAMPLEDATA.split('\n'), prefix='generated')
stepdata = [parse_step(step) for step in steptxt]