Skip to content

Commit

Permalink
Switch to GFW infra API from csv
Browse files Browse the repository at this point in the history
  • Loading branch information
jonaraphael committed Nov 25, 2024
1 parent f74dba6 commit 4395334
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 13 deletions.
4 changes: 2 additions & 2 deletions cerulean_cloud/cloud_function_ais_analysis/utils/analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,10 +124,10 @@ def __init__(self, s1_scene, **kwargs):

if self.infra_gdf is None:
self.infra_api_token = os.getenv("INFRA_API_TOKEN")
self.infra_gdf = self.load_infrastructure_data()
self.infra_gdf = self.load_infrastructure_data_api()
self.coincidence_scores = np.zeros(len(self.infra_gdf))

def load_infrastructure_data(self, only_oil=True):
def load_infrastructure_data_csv(self, only_oil=True):
"""
Loads infrastructure data from a CSV file.
"""
Expand Down
17 changes: 6 additions & 11 deletions notebooks/ASA_test_environment.py
Original file line number Diff line number Diff line change
Expand Up @@ -243,19 +243,13 @@ def plot_coincidence(
s1_scene = get_s1_scene(slick_gdf.s1_scene_id.iloc[0])

source_types = []
# source_types += [1] # ais
source_types += [1] # ais
source_types += [2] # infra
if not ( # If the last analyzer is for the same scene, reuse it
analyzers
and next(iter(analyzers.items()))[1].s1_scene.scene_id == s1_scene.scene_id
):
analyzers = {
s_type: ASA_MAPPING[s_type](
s1_scene,
gfw_infra_filepath="/Users/jonathanraphael/git/cerulean-cloud/cerulean_cloud/cloud_function_ais_analysis/SAR Fixed Infrastructure 202407 DENOISED UNIQUE.csv",
)
for s_type in source_types
}
analyzers = {s_type: ASA_MAPPING[s_type](s1_scene) for s_type in source_types}

ranked_sources = pd.DataFrame(columns=["type", "st_name", "collated_score"])
for s_type, analyzer in analyzers.items():
Expand All @@ -275,12 +269,13 @@ def plot_coincidence(
]
)

if 2 in analyzers:
if 2 in analyzers.keys():
plot_coincidence(analyzers[2], slick_id)

print(ranked_sources[["type", "st_name", "collated_score"]].head())
print(
ranked_sources[["type", "ext_id", "coincidence_score", "collated_score"]].head()
)

print(ranked_sources.head())
# print(accumulated_sources)
# %%
fake_infra_gdf = generate_infrastructure_points(slick_gdf, 50000)
Expand Down

0 comments on commit 4395334

Please sign in to comment.