Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Binary file added Scripts/__pycache__/chemistry.cpython-38.pyc
Binary file not shown.
Binary file added Scripts/__pycache__/getENSDFdata.cpython-38.pyc
Binary file not shown.
Binary file added Scripts/__pycache__/parseENSDF.cpython-38.pyc
Binary file not shown.
2 changes: 1 addition & 1 deletion Scripts/chemistry.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ def getZ(ele):
return elements[ele.capitalize()]

def getElement(z):
for ele in elements.keys():
for ele in list(elements.keys()):
if elements[ele] == z:
return ele
return "None"
33 changes: 33 additions & 0 deletions Scripts/chemistry.py.bak
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
elements = {'H' : 1, 'He' : 2, 'Li' : 3, 'Be' : 4, 'B' : 5,
'C' : 6, 'N' : 7, 'O' : 8, 'F' : 9, 'Ne' : 10,
'Na' : 11,'Mg' : 12,'Al' : 13,'Si' : 14,'P' : 15,
'S' : 16,'Cl' : 17,'Ar' : 18,'K' : 19,'Ca' : 20,
'Sc' : 21,'Ti' : 22,'V' : 23,'Cr' : 24,'Mn' : 25,
'Fe' : 26,'Co' : 27,'Ni' : 28,'Cu' : 29,'Zn' : 30,
'Ga' : 31,'Ge' : 32,'As' : 33,'Se' : 34,'Br' : 35,
'Kr' : 36,'Rb' : 37,'Sr' : 38,'Y' : 39,'Zr' : 40,
'Nb' : 41,'Mo' : 42,'Tc' : 43,'Ru' : 44,'Rh' : 45,
'Pd' : 46,'Ag' : 47,'Cd' : 48,'In' : 49,'Sn' : 50,
'Sb' : 51,'Te' : 52,'I' : 53,'Xe' : 54,'Cs' : 55,
'Ba' : 56,'La' : 57,'Ce' : 58,'Pr' : 59,'Nd' : 60,
'Pm' : 61,'Sm' : 62,'Eu' : 63,'Gd' : 64,'Tb' : 65,
'Dy' : 66,'Ho' : 67,'Er' : 68,'Tm' : 69,'Yb' : 70,
'Lu' : 71,'Hf' : 72,'Ta' : 73,'W' : 74,'Re' : 75,
'Os' : 76,'Ir' : 77,'Pt' : 78,'Au' : 79,'Hg' : 80,
'Tl' : 81,'Pb' : 82,'Bi' : 83,'Po' : 84,'At' : 85,
'Rn' : 86,'Fr' : 87,'Ra' : 88,'Ac' : 89,'Th' : 90,
'Pa' : 91,'U' : 92,'Np' : 93,'Pu' : 94,'Am' : 95,
'Cm' : 96,'Bk' : 97,'Cf' : 98,'Es' : 99,'Fm' :100,
'Md' :101,'No' :102,'Lr' :103,'Rf' :104,'Db' :105,
'Sg' :106,'Bh' :107,'Hs' :108,'Mt' :109,'Ds' :110,
'Rg' :111,'Cn' :112,'Uut':113,'Fl' :114,'Uup':115,
'Lv' :116,'Uus':117,'Uuo':118}

def getZ(ele):
return elements[ele.capitalize()]

def getElement(z):
for ele in elements.keys():
if elements[ele] == z:
return ele
return "None"
6 changes: 3 additions & 3 deletions Scripts/getAbundance.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,17 @@ def findAbundance(iso):
dir = "./Data/"
fname = dir + "abundances.dat"
f = open(fname)
tokens = map(lambda line: re.split(" ", line), f.readlines())
tokens = [re.split(" ", line) for line in f.readlines()]

for words in tokens:
for word in words:
if word == iso:
return filter(None,words)[2]
return [_f for _f in words if _f][2]

def main(argv):
iso = sys.argv[1]
abundance = findAbundance(iso)
print abundance
print(abundance)
return abundance

if __name__ == '__main__' :
Expand Down
24 changes: 24 additions & 0 deletions Scripts/getAbundance.py.bak
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
#!/usr/bin/python
import sys
import re

def findAbundance(iso):
dir = "./Data/"
fname = dir + "abundances.dat"
f = open(fname)
tokens = map(lambda line: re.split(" ", line), f.readlines())

for words in tokens:
for word in words:
if word == iso:
return filter(None,words)[2]

def main(argv):
iso = sys.argv[1]
abundance = findAbundance(iso)
print abundance
return abundance

if __name__ == '__main__' :
main(sys.argv)

28 changes: 14 additions & 14 deletions Scripts/getENSDFdata.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
#!/usr/bin/python
import urllib2
import urllib.request, urllib.error, urllib.parse
import re
import sys
import chemistry
import socket
from urllib2 import Request
from sgmllib import SGMLParser
from urllib.request import Request
from sgmllib3k import SGMLParser

class URLLister(SGMLParser):
def reset(self):
Expand All @@ -25,10 +25,10 @@ def getPage(url, ele, A):
req = Request(url)
page = ''
try:
page = urllib2.urlopen(req,timeout=3)
page = urllib.request.urlopen(req,timeout=3)
except socket.timeout:
print "ERROR: TIMEOUT"
print url
print("ERROR: TIMEOUT")
print(url)

text = re.sub('<.*>','',page.read())
text = text[re.search('[0-9]',text).start():]
Expand All @@ -49,7 +49,7 @@ def getURL(ele, A):

# nndc_url = 'https://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc='+str(A)+ele.upper()+'&unc=nds'
nndc_url = 'https://www.nndc.bnl.gov/nudat2/decaysearchdirect.jsp?nuc='+str(A)+ele.upper()+'&unc=nds'
nndc_page = urllib2.urlopen(nndc_url,timeout=3)
nndc_page = urllib.request.urlopen(nndc_url,timeout=3)
parser = URLLister()
parser.feed(nndc_page.read())
parser.close()
Expand All @@ -64,14 +64,14 @@ def getURL(ele, A):
#url = 'https://www.nndc.bnl.gov/chart/' + url_end
url = 'https://www.nndc.bnl.gov/nudat2/' + url_end

print 'Retrieving ENSDF data from:\t',url
print('Retrieving ENSDF data from:\t',url)
req = Request(url)
page = ''
try:
page = urllib2.urlopen(req,timeout=3)
except (socket.timeout, urllib2.URLError):
print "ERROR: TIMEOUT"
print url
page = urllib.request.urlopen(req,timeout=3)
except (socket.timeout, urllib.error.URLError):
print("ERROR: TIMEOUT")
print(url)

text = re.sub('<.*>','',page.read())

Expand All @@ -89,7 +89,7 @@ def getURL(ele, A):
else :
continue
if len(text) < 30 :
print 'WARNING: Could not find alpha for ele = {}, A = {}'.format(ele,A)
print('WARNING: Could not find alpha for ele = {}, A = {}'.format(ele,A))
break

# Check that this page is for a ground state decay
Expand All @@ -104,7 +104,7 @@ def getURL(ele, A):

def main(argv):
if(len(argv) != 3):
print 'Usage: ./getENSDFdata.py [element] [A]'
print('Usage: ./getENSDFdata.py [element] [A]')
return

ele = argv[1]
Expand Down
116 changes: 116 additions & 0 deletions Scripts/getENSDFdata.py.bak
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
#!/usr/bin/python
import urllib2
import re
import sys
import chemistry
import socket
from urllib2 import Request
from sgmllib import SGMLParser

class URLLister(SGMLParser):
def reset(self):
SGMLParser.reset(self)
self.urls = []

def start_a(self, attrs):
href = [v for k, v in attrs if k=='href']
if href:
self.urls.extend(href)


def getPage(url, ele, A):
dir = './Data/Decays/'
fname = dir + 'ensdf' + ele.capitalize() + str(A) + '.dat'
f = open(fname,'w')
req = Request(url)
page = ''
try:
page = urllib2.urlopen(req,timeout=3)
except socket.timeout:
print "ERROR: TIMEOUT"
print url

text = re.sub('<.*>','',page.read())
text = text[re.search('[0-9]',text).start():]
newtext = ""
for line in text.split('\n'):
if(len(line)>1):
newtext += line + '\n'
text = re.sub(r'^\t','',newtext,flags=re.MULTILINE)
#print(text)
f.write(text)
return page.read()

def getURL(ele, A):
Z = chemistry.getZ(ele)
dau_A = A - 4
dau_Z = Z-2
dau_ele = chemistry.getElement(dau_Z)

# nndc_url = 'https://www.nndc.bnl.gov/chart/decaysearchdirect.jsp?nuc='+str(A)+ele.upper()+'&unc=nds'
nndc_url = 'https://www.nndc.bnl.gov/nudat2/decaysearchdirect.jsp?nuc='+str(A)+ele.upper()+'&unc=nds'
nndc_page = urllib2.urlopen(nndc_url,timeout=3)
parser = URLLister()
parser.feed(nndc_page.read())
parser.close()
nndc_page.close()
url_ends = []
for a_url in parser.urls:
mod_url = re.sub(' ','%20',a_url)
if re.search('getdecaydataset',mod_url) and re.search('a%20decay',mod_url):
url_ends.append(mod_url)

for url_end in url_ends:
#url = 'https://www.nndc.bnl.gov/chart/' + url_end
url = 'https://www.nndc.bnl.gov/nudat2/' + url_end

print 'Retrieving ENSDF data from:\t',url
req = Request(url)
page = ''
try:
page = urllib2.urlopen(req,timeout=3)
except (socket.timeout, urllib2.URLError):
print "ERROR: TIMEOUT"
print url

text = re.sub('<.*>','',page.read())

# Check that this page is for an alpha decay
is_adecay = re.search(" A DECAY",text)
if not is_adecay:
continue
adecay_pos = text.find("A DECAY")
if adecay_pos > 0 and adecay_pos < 30 :
break

# Prune the page and check that it might have interesting content
if re.search('[0-9]',text) :
text = text[re.search('[0-9]',text).start():]
else :
continue
if len(text) < 30 :
print 'WARNING: Could not find alpha for ele = {}, A = {}'.format(ele,A)
break

# Check that this page is for a ground state decay
level = 0
for line in text.split('\n'):
if len(line) > 8 and line[6] == ' ' and line[7] == 'P':
level = line.split()[2]
if level == '0.0':
break

return url

def main(argv):
if(len(argv) != 3):
print 'Usage: ./getENSDFdata.py [element] [A]'
return

ele = argv[1]
A = int(argv[2])
url = getURL(ele,A)
getPage(url, ele, A)

if __name__ == "__main__":
main(sys.argv)
4 changes: 2 additions & 2 deletions Scripts/getNaturalIsotopes.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ def findIsotopes(ele):
dir = "./Data/"
fname = dir + "abundances.dat"
f = open(fname)
tokens = map(lambda line: re.split(" ", line), f.readlines())
tokens = [re.split(" ", line) for line in f.readlines()]

isotopes = ""

Expand All @@ -19,7 +19,7 @@ def findIsotopes(ele):

def main(argv):
ele = argv[1]
print findIsotopes(ele)
print(findIsotopes(ele))

if __name__ == '__main__':
main(sys.argv)
25 changes: 25 additions & 0 deletions Scripts/getNaturalIsotopes.py.bak
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
#!/usr/bin/python
import sys
import re

def findIsotopes(ele):
dir = "./Data/"
fname = dir + "abundances.dat"
f = open(fname)
tokens = map(lambda line: re.split(" ", line), f.readlines())

isotopes = ""

for words in tokens:
for word in words:
if re.sub('[0-9]','',word) == ele.capitalize():
isotopes += re.sub('[A-Z a-z]','',word) + " "

return isotopes

def main(argv):
ele = argv[1]
print findIsotopes(ele)

if __name__ == '__main__':
main(sys.argv)
Loading