Skip to content

Main #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
60 changes: 55 additions & 5 deletions Start/Ch_1/challenge_start.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,65 @@
# Programming challenge: summarize the earthquake data

import json
import pprint


# for this challenge, we're going to summarize the earthquake data as follows:
# 1: How many quakes are there in total?
# 2: How many quakes were felt by at least 100 people?
# 3: Print the name of the place whose quake was felt by the most people, with the # of reports
# 4: Print the top 10 most significant events, with the significance value of each

# open the data file and load the JSON
with open("../../30DayQuakes.json", "r") as datafile:
data = json.load(datafile)



def quake(q):
if q["properties"]["type"] == "earthquake":
return True
return False


def get_felt(record):
felt = record["properties"]['felt']
if (felt is None):
felt = 0
return float(felt)


def felt_filter(q):
felt_quake = q['properties']['felt']
if felt_quake is not None and felt_quake >= 100:
return True
return False


def get_sig(q):
sig = q['properties']['sig']
if sig is not None:
return float(sig)
return 0.0


def get_most_significant(events):
events["features"].sort(key=get_sig, reverse=True)
return events["features"][0]


# 1: How many quakes are there in total?
quakes = list(filter(quake, data['features']))
print(f"Total number of quakes: {len(quakes)}")
print(f"Total number of events: {data['metadata']['count']}")

# 2: How many quakes were felt by at least 100 people?
popular = list(filter(felt_filter, quakes))
print(f"Number of quakes felt by at least 100 people: {len(popular)}")

# 3: Print the name of the place whose quake was felt by the most people, with the # of reports
most_felt = max(data["features"], key=get_felt)
print(
f"Most felt quake at {most_felt['properties']['title']}, reports: {most_felt['properties']['felt']}")


# 4: Print the top 10 most significant events, with the significance value of each
sig_events = sorted(data["features"], key=get_sig, reverse=True)
for i in range(0, 10):
print(
f"Event: {sig_events[i]['properties']['title']}, Significance: {sig_events[i]['properties']['sig']}")
20 changes: 18 additions & 2 deletions Start/Ch_1/filtering.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,26 @@ def filterUppers(x):
chars = "abcDeFGHiJklmnoP"

# TODO: use filter to remove items from a list
odds = list(filter(filterEvens, nums))
lowers = list(filter(filterUppers, chars))
#print(odds)
#print(lowers)

# TODO: use filter on non-numeric sequence

# Use the filter on our data - let's filter out all seismic events that were *not* quakes
# open the data file and load the JSON
# with open("../../30DayQuakes.json", "r") as datafile:
# data = json.load(datafile)
with open("../../30DayQuakes.json", "r") as datafile:
data = json.load(datafile)


def notAQuake(q):
if q["properties"]["type"] == "earthquake":
return False
return True


events = list(filter(notAQuake, data["features"]))
print(f"Total non-quake events: {len(events)}")
for i in range(0, 10):
print(events[i]["properties"]["type"])
26 changes: 21 additions & 5 deletions Start/Ch_1/minmax.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,30 @@


# TODO: The min() function finds the minimum value

# print(f"The minimum value is: {min(values)}")
# print(f"The minimum value is: {min(strings)}")

# TODO: The max() function finds the maximum value

# print(f"The minimum value is: {max(values)}")
# print(f"The minimum value is: {max(strings)}")

# TODO: define a custom "key" function to extract a data field

# print(f"The minimum value is: {min(strings, key=len)}")
# print(f"The minimum value is: {max(strings, key=len)}")

# TODO: open the data file and load the JSON
# with open("../../30DayQuakes.json", "r") as datafile:
# data = json.load(datafile)
with open("../../30DayQuakes.json", "r") as datafile:
data = json.load(datafile)
print(data["metadata"]["title"])
print(len(data["features"]))


def getmag(dataitem):
magnitude = dataitem["properties"]["mag"]
if (magnitude is None):
magnitude = 0
return float(magnitude)


print(min(data["features"], key=getmag))
print(max(data["features"], key=getmag))
28 changes: 19 additions & 9 deletions Start/Ch_1/sorting.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,21 +8,31 @@
names = ["Jeff", "Bill", "Addie", "Stephanie", "Zach", "Lukas", "Joe", "Stacy"]

# TODO: the sorted() function can be used to return a new list with sorted data

# result1 = sorted(numbers)
# print(numbers)
# print(result1)

# TODO: alternately, you can use the list object's sort() method, which sorts the list in-place

# print(names)
# names.sort(reverse=True)
# print(names)

# TODO: To sort custom objects, we can tell the sort function which property to use
# by specifying a key function

# open the data file and load the JSON
# with open("../../30DayQuakes.json", "r") as datafile:
# data = json.load(datafile)
with open("../../30DayQuakes.json", "r") as datafile:
data = json.load(datafile)


def getmag(dataitem):
magnitude = dataitem["properties"]["mag"]
if (magnitude is None):
magnitude = 0
return float(magnitude)


# def getmag(dataitem):
# magnitude = dataitem["properties"]["mag"]
# if (magnitude is None):
# magnitude = 0
# return float(magnitude)
# Sort on magnitude value, descending order
data["features"].sort(key=getmag, reverse=True)
for i in range(0, 10):
print(data["features"][i]["properties"]["place"])
33 changes: 27 additions & 6 deletions Start/Ch_1/transform.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import json
import pprint
import datetime


def squareFunc(x):
Expand All @@ -26,20 +27,40 @@ def toGrade(x):
grades = (81, 89, 94, 78, 61, 66, 99, 74)

# TODO: use map to create a new sequence of values
squares = list(map(squareFunc, nums))
# print(nums)
# print(squares)

# TODO: use sorted and map to change numbers to grades
grades = sorted(grades)
letters = list(map(toGrade, grades))
print(grades)
print(letters)

# Use the filter on our data - let's filter out all seismic events that were *not* quakes
# open the data file and load the JSON
# with open("../../30DayQuakes.json", "r") as datafile:
# data = json.load(datafile)

with open("../../30DayQuakes.json", "r") as datafile:
data = json.load(datafile)

# filter the data down to the largest events
# def bigmag(q):
# return q['properties']['mag'] is not None and q['properties']['mag'] >= 6


# results = list(filter(bigmag, data['features']))
def bigmag(q):
return q['properties']['mag'] is not None and q['properties']['mag'] >= 6


results = list(filter(bigmag, data["features"]))

# TODO: transform the largest events into a simpler structure


def simplify(q):
return {
"place": q["properties"]["place"],
"magnitude": q["properties"]["mag"],
"date": str(datetime.date.fromtimestamp(q["properties"]["time"]/1000))
}


results = list(map(simplify, results))
pprint.pp(results)
18 changes: 11 additions & 7 deletions Start/Ch_1/utility.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,25 +5,29 @@
values = [0, 1, 2, 3, 4, 5]

# TODO: any() can be used to see if any value in a sequence is True

# print(any(values))

# TODO: all() will detect if all of the values in a sequence are True

# print(all(values))

# TODO: sum() can be use to add all of the values in a sequence

# print(any(values))

# these utility functions don't have callbacks like min or max,
# but we can use a generator for more fine control

# open the data file and load the JSON
# with open("../../30DayQuakes.json", "r") as datafile:
# data = json.load(datafile)
with open("../../30DayQuakes.json", "r") as datafile:
data = json.load(datafile)

# TODO: are there any quake reports that were felt by more than 25,000 people?

print(any(quake["properties"]["felt"] is not None and quake["properties"]["felt"] > 25000
for quake in data["features"]))

# TODO: how many quakes were felt by more than 500 people?

print(sum(quake["properties"]["felt"] is not None and quake["properties"]["felt"] > 500
for quake in data["features"]))

# TODO: how many quakes had a magnitude of 6 or larger?
print(sum(quake["properties"]["mag"] is not None and quake["properties"]["mag"] >= 6.0
for quake in data["features"]))
11 changes: 10 additions & 1 deletion Start/Ch_2/challenge_start.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,17 @@
# Programming challenge: use advanced data collections on the earthquake data

import json

from collections import defaultdict

# open the data file and load the JSON
with open("../../30DayQuakes.json", "r") as datafile:
data = json.load(datafile)

# Print type of event and the number of events for that type.
totals = defaultdict(int)
for event in data['features']:
totals[event['properties']['type']] +=1


for k,v in totals.items():
print(f"{k:15}: {v}")
10 changes: 10 additions & 0 deletions Start/Ch_2/counter.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,25 @@
"Gabby", "Kelly", "James", "Joe", "Sam", "Tara", "Ziggy"]

# TODO: Create a Counter for class1 and class2
c1 = Counter(class1)
c2 = Counter(class2)

# TODO: How many students in class 1 named James?
print(c1["James"])

# TODO: How many students are in class 1?
print(sum(c1.values()), " students in class 1")

# TODO: Combine the two classes
c1.update(class2)
print(sum(c1.values()), " students in class 1")

# TODO: What's the most common name in the two classes?
print(c1.most_common(3))

# TODO: Separate the classes again
c1.subtract(class2)
print(c1.most_common(3))

# TODO: What's common between the two classes?
print(c1 & c2)
3 changes: 2 additions & 1 deletion Start/Ch_2/defaultdict.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@
'apple', 'grape', 'banana', 'banana']

# TODO: use a dictionary to count each element
fruitCounter = dict()
fruitCounter = defaultdict(int)

# TODO: Count the elements in the list
for fruit in fruits:
fruitCounter[fruit] += 1

# TODO: print the result
print(fruitCounter)
15 changes: 14 additions & 1 deletion Start/Ch_2/deque.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,25 @@
import string


# TODO: initialize a deque with lowercase letters
# TODO: initialize a deque with lowercase letters]
d = collections.deque(string.ascii_lowercase)

# TODO: deques support the len() function
print(f"Item count: {len(d)}")

# TODO: deques can be iterated over
# for elem in d:
# print(elem, elem.upper())

# TODO: manipulate items from either end
d.pop()
d.popleft()
d.append(2)
d.appendleft(1)


# TODO: use an index to get a particular item
print(d)
d.rotate(1)
print(d)
print(d[5])
8 changes: 8 additions & 0 deletions Start/Ch_2/namedtuple.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,13 @@


# TODO: create a Point namedtuple
Point = collections.namedtuple("Point", "x y")
p1 = Point(10, 20)
p2 = Point(30, 40)

print(p1, p2)
print(p1.x, p1.y)

# TODO: use _replace to create a new instance
p1 = p1._replace(x=100)
print(p1.x, p1.y)
Loading