Skip to content

Commit

Permalink
Merge pull request #129 from agimus-project/128-numpyint-deprecated-o…
Browse files Browse the repository at this point in the history
…ther-type-aliases-too

128 numpyint deprecated other type aliases too
  • Loading branch information
nim65s authored Feb 14, 2024
2 parents f5f1faf + 7dcf6d5 commit 6bbf7e2
Show file tree
Hide file tree
Showing 16 changed files with 32 additions and 34 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,10 @@ def __init__(
@staticmethod
def make_empty_predictions():
infos = {
"view_id": np.empty(0, dtype=np.int),
"scene_id": np.empty(0, dtype=np.int),
"view_id": np.empty(0, dtype=int),
"scene_id": np.empty(0, dtype=int),
"label": np.empty(0, dtype=np.object),
"score": np.empty(0, dtype=np.float),
"score": np.empty(0, dtype=float),
}
bboxes = torch.empty(0, 4, dtype=torch.float)
return tc.PandasTensorCollection(infos=pd.DataFrame(infos), bboxes=bboxes)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,10 +63,10 @@ def __init__(
@staticmethod
def make_empty_predictions():
infos = {
"view_id": np.empty(0, dtype=np.int),
"scene_id": np.empty(0, dtype=np.int),
"view_id": np.empty(0, dtype=int),
"scene_id": np.empty(0, dtype=int),
"label": np.empty(0, dtype=np.object),
"score": np.empty(0, dtype=np.float),
"score": np.empty(0, dtype=float),
}
poses = torch.empty(0, 4, 4, dtype=torch.float)
return tc.PandasTensorCollection(infos=pd.DataFrame(infos), poses=poses)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def compute_ap(label_df, label_n_gt):
label_df = label_df.sort_values("score", ascending=False).reset_index(
drop=True,
)
label_df["n_tp"] = np.cumsum(label_df[valid_k].values.astype(np.float))
label_df["n_tp"] = np.cumsum(label_df[valid_k].values.astype(float))
label_df["prec"] = label_df["n_tp"] / (np.arange(len(label_df)) + 1)
label_df["recall"] = label_df["n_tp"] / label_n_gt
y_true = label_df[valid_k]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -361,7 +361,7 @@ def compute_ap(label_df, label_n_gt):
label_df = label_df.sort_values("score", ascending=False).reset_index(
drop=True,
)
label_df["n_tp"] = np.cumsum(label_df[valid_k].values.astype(np.float))
label_df["n_tp"] = np.cumsum(label_df[valid_k].values.astype(float))
label_df["prec"] = label_df["n_tp"] / (np.arange(len(label_df)) + 1)
label_df["recall"] = label_df["n_tp"] / label_n_gt
y_true = label_df[valid_k]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def add_inst_num(
group_keys=["scene_id", "view_id", "label"],
key="pred_inst_num",
):
inst_num = np.empty(len(infos), dtype=np.int)
inst_num = np.empty(len(infos), dtype=int)
for _group_name, group_ids in infos.groupby(group_keys).groups.items():
inst_num[group_ids.values] = np.arange(len(group_ids))
infos[key] = inst_num
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def get_predictions(
keep_ids = np.concatenate(keep_ids)
batch_im_ids = np.concatenate(batch_im_ids)
detections_ = detections[keep_ids]
detections_.infos["batch_im_id"] = np.array(batch_im_ids).astype(np.int)
detections_.infos["batch_im_id"] = np.array(batch_im_ids).astype(int)
else:
msg = "No detections"
raise ValueError(msg)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def get_normal(
mask[depth_refine == 0] = 1
depth_refine = depth_refine.astype(np.float32)
depth_refine = cv2.inpaint(depth_refine, mask, 2, cv2.INPAINT_NS)
depth_refine = depth_refine.astype(np.float)
depth_refine = depth_refine.astype(float)
depth_refine = ndimage.gaussian_filter(depth_refine, 2)

uv_table = np.zeros((res_y, res_x, 2), dtype=np.int16)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,10 @@

class MultiviewScenePredictor:
def __init__(self, mesh_db, n_sym=64, ba_aabb=True, ba_n_points=None):
self.mesh_db_ransac = mesh_db.batched(n_sym=n_sym, aabb=True).cuda().float()
self.mesh_db_ba = (
mesh_db.batched(aabb=ba_aabb, resample_n_points=ba_n_points, n_sym=n_sym)
.cuda()
.float()
)
self.mesh_db_ransac = mesh_db.batched(n_sym=n_sym, aabb=True).float()
self.mesh_db_ba = mesh_db.batched(
aabb=ba_aabb, resample_n_points=ba_n_points, n_sym=n_sym
).float()

def reproject_scene(self, objects, cameras):
TCO_data = []
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -102,15 +102,15 @@ def score_tmaches_batch(candidates, tmatches, TC1C2, mesh_db, bsz=4096):
def scene_level_matching(candidates, inliers):
cand1 = inliers["inlier_matches_cand1"]
cand2 = inliers["inlier_matches_cand2"]
edges = np.ones((len(cand1)), dtype=np.int)
edges = np.ones((len(cand1)), dtype=int)
n_cand = len(candidates)
graph = csr_matrix((edges, (cand1, cand2)), shape=(n_cand, n_cand))
n_components, ids = connected_components(graph, directed=True, connection="strong")

component_size = defaultdict(lambda: 0)
for idx in ids:
component_size[idx] += 1
obj_n_cand = np.empty(len(ids), dtype=np.int)
obj_n_cand = np.empty(len(ids), dtype=int)
for n, idx in enumerate(ids):
obj_n_cand[n] = component_size[idx]

Expand All @@ -132,7 +132,7 @@ def scene_level_matching(candidates, inliers):
def make_obj_infos(matched_candidates):
scene_infos = matched_candidates.infos.loc[:, ["obj_id", "score", "label"]].copy()
gb = scene_infos.groupby("obj_id")
scene_infos["n_cand"] = gb["score"].transform(len).astype(np.int)
scene_infos["n_cand"] = gb["score"].transform(len).astype(int)
scene_infos["score"] = gb["score"].transform(np.sum)
scene_infos = gb.first().reset_index(drop=False)
return scene_infos
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def objects_pos_orn_rand_falling(self):

def sample_camera(self):
assert self.focal_interval.shape == (2, 2)
K = np.zeros((3, 3), dtype=np.float)
K = np.zeros((3, 3), dtype=float)
fxfy = self.np_random.uniform(*self.focal_interval)
W, H = max(self.resolution), min(self.resolution)
K[0, 0] = fxfy[0]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ def xr_merge(ds1, ds2, on, how="left", dim1="dim_0", dim2="dim_0", fill_value=np
mask = np.isfinite(idx2)
# assert mask.sum() == ds2.dims[dim1]
idx1 = idx1[mask]
idx2 = idx2[mask].astype(np.int)
idx2 = idx2[mask].astype(int)

for k, data_var in ds2.data_vars.items():
array = data_var.values
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -179,9 +179,9 @@ def mark_inliers(cand_inputs, cand_matched):
on=["scene_id", "view_id", "label", "cand_id"],
how="left",
)
infos["is_inlier"] = infos["is_inlier"].astype(np.float)
infos.loc[~np.isfinite(infos.loc[:, "is_inlier"].astype(np.float)), "is_inlier"] = 0
infos["is_inlier"] = infos["is_inlier"].astype(np.bool)
infos["is_inlier"] = infos["is_inlier"].astype(float)
infos.loc[~np.isfinite(infos.loc[:, "is_inlier"].astype(float)), "is_inlier"] = 0
infos["is_inlier"] = infos["is_inlier"].astype(bool)
cand_inputs.infos = infos
return cand_inputs

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,10 @@ def __init__(
@staticmethod
def make_empty_predictions():
infos = {
"view_id": np.empty(0, dtype=np.int),
"scene_id": np.empty(0, dtype=np.int),
"view_id": np.empty(0, dtype=int),
"scene_id": np.empty(0, dtype=int),
"label": np.empty(0, dtype=np.object),
"score": np.empty(0, dtype=np.float),
"score": np.empty(0, dtype=float),
}
poses = torch.empty(0, 4, 4, dtype=torch.float)
return tc.PandasTensorCollection(infos=pd.DataFrame(infos), poses=poses)
Expand Down
4 changes: 2 additions & 2 deletions happypose/pose_estimators/megapose/inference/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,8 @@ def from_numpy(
Args:
----
rgb: [H,W,3] np.uint8
depth: [H,W] np.float
K: [3,3] np.float
depth: [H,W] float
K: [3,3] float
"""
assert rgb.dtype == np.uint8
Expand Down
4 changes: 2 additions & 2 deletions happypose/toolbox/inference/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,8 @@ def from_numpy(
Args:
----
rgb: [H,W,3] np.uint8
depth: [H,W] np.float
K: [3,3] np.float
depth: [H,W] float
K: [3,3] float
"""
assert rgb.dtype == np.uint8
Expand Down
4 changes: 2 additions & 2 deletions happypose/toolbox/visualization/meshcat_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ def visualize_mesh(vis, mesh, transform=None, color=None, texture_png=None):
color_hex = rgb2hex(tuple(color))
material = meshcat.geometry.MeshPhongMaterial(color=color_hex)
else: # color is np.ndarray, e.g. [1,0,0]
if not np.issubdtype(color.dtype, np.int):
if not np.issubdtype(color.dtype, int):
color = (color * 255).astype(np.int32)
color_hex = rgb2hex(tuple(color))
material = meshcat.geometry.MeshPhongMaterial(color=color_hex)
Expand All @@ -112,7 +112,7 @@ def visualize_scene(vis, object_dict, randomize_color=True):
color = data["color"]

# if it's not an integer, convert it to [0,255]
if not np.issubdtype(color.dtype, np.int):
if not np.issubdtype(color.dtype, int):
color = (color * 255).astype(np.int32)
else:
color = np.random.randint(low=0, high=256, size=3)
Expand Down

0 comments on commit 6bbf7e2

Please sign in to comment.