diff --git a/.gitignore b/.gitignore index 50b6903..444ffd9 100644 --- a/.gitignore +++ b/.gitignore @@ -14,4 +14,5 @@ flagged/ datasets/ runs/ -__pycache__/ \ No newline at end of file +__pycache__/ +horus_prj-*/ \ No newline at end of file diff --git a/app-x.py b/app-x.py new file mode 100644 index 0000000..9db8362 --- /dev/null +++ b/app-x.py @@ -0,0 +1,23 @@ +import gradio as gr +from horus import video_processing + + +with gr.Blocks() as main_ui: + with gr.Tab("Upload Video to Database"): + with gr.Row(): + with gr.Column(): + input_videos = gr.File(label="Upload Video", file_count="multiple", file_types=[".mp4", ".mov", ".mpg"]) + input_project_name = gr.Text(label="Project Name") + + upload_button = gr.Button("Start Upload") + with gr.Column(): + output_status = gr.Text(label="Status") + + upload_button.click( + video_processing.video_processing_ui, + inputs=[input_videos, input_project_name], + outputs=[output_status]) + + +if __name__ == "__main__": + main_ui.queue().launch(server_name="0.0.0.0", server_port=7861) diff --git a/app.py b/app.py index 230c0f5..8ce9654 100644 --- a/app.py +++ b/app.py @@ -61,7 +61,7 @@ def update_target_frame(video_path, x, y, w, h): with gr.Tab("Video Inference"): with gr.Row(): with gr.Column(): - input_video = gr.File(label="Upload Video", file_count="single", file_types=[".mp4", ".mov", ".mpg"]) + input_video = gr.File(label="Upload Video", file_count="single", file_types=[".mp4", ".mov", ".mpg", "webm"]) output_image = gr.Image(type="numpy", label="result image") input_x = gr.Slider( minimum=0, @@ -113,4 +113,4 @@ def update_target_frame(video_path, x, y, w, h): if __name__ == "__main__": - main_ui.queue().launch(server_name="0.0.0.0") + main_ui.queue().launch(server_name="0.0.0.0", server_port=7861) diff --git a/horus/project_manager.py b/horus/project_manager.py new file mode 100644 index 0000000..b6571fc --- /dev/null +++ b/horus/project_manager.py @@ -0,0 +1,22 @@ +import uuid +from datetime import datetime +import yaml +import os + + +def make_project(project_name: str, project_host_dir="/workspace/horus_inference_server/projects"): + project_id = str(uuid.uuid1())[0:11].replace("-", "") + project_dir = os.path.join(project_host_dir, "horus_prj-" + project_id) + os.makedirs(project_dir, exist_ok=True) + + project_description_file_path = os.path.join(project_dir, "horus.yaml") + + project_data = {} + project_data["project_name"] = project_name + project_data["create_date"] = datetime.now().strftime("%Y-%m-%d %H:%M:%S") + + with open(project_description_file_path, 'w') as f: + yaml.dump(project_data, f, default_flow_style=False, allow_unicode=True) + + print(f"プロジェクトファイルが作成されました: {project_description_file_path}") + return project_dir diff --git a/horus/util.py b/horus/util.py index 3a5af37..c7ae6c4 100644 --- a/horus/util.py +++ b/horus/util.py @@ -1,6 +1,7 @@ import cv2 import os import uuid +import re def get_image_from_video(video_path: str, frame_id: int): @@ -47,3 +48,10 @@ def video_to_images(video_path: str): cap.release() return output_dir + + +def natural_sort(file_list): + def alphanum_key(key): + filename = os.path.basename(key) + return [int(text) if text.isdigit() else text for text in re.split(r'(\d+)', filename)] + return sorted(file_list, key=alphanum_key) diff --git a/horus/video_processing.py b/horus/video_processing.py new file mode 100644 index 0000000..265173b --- /dev/null +++ b/horus/video_processing.py @@ -0,0 +1,78 @@ +import os +import subprocess +import tempfile +import cv2 +from pathlib import Path +from horus import util +from horus import project_manager + + +def make_video_list_file(video_files: list[str]): + file_name = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4').name + f = open(file_name, 'w') + + for video_file in video_files: + p = Path(video_file) + video_path = p.resolve() + + f.write(f"file '{video_path}'\n") + + return file_name + + +def run_ffmpeg_concat_av1(input_list: str, output_file: str, preset="fast"): + command = [ + "ffmpeg", + "-safe", "0", + "-f", "concat", + "-i", input_list, + "-c:v", "av1_nvenc", + "-preset", preset, + "-b:v", "500k", + output_file + ] + + try: + result = subprocess.run(command, check=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + print("ffmpegコマンドの実行に成功しました。") + print(result.stdout) + except subprocess.CalledProcessError as e: + print("ffmpegコマンドの実行中にエラーが発生しました。") + print(e.stderr) + + +def run_ffmpeg_timelaps_av1(input_file: str, output_file: str, max_time_sec: int): + cap = cv2.VideoCapture(input_file) + video_time_sec = cap.get(cv2.CAP_PROP_FRAME_COUNT) / cap.get(cv2.CAP_PROP_FPS) + scale = video_time_sec / max_time_sec + + command = [ + "ffmpeg", + "-i", input_file, + "-r", "30", + "-c:v", "av1_nvenc", + "-b:v", "500k", + "-filter:v", f"setpts={(1.0 / scale)}*PTS", + output_file + ] + + try: + result = subprocess.run(command, check=True, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + print("ffmpegコマンドの実行に成功しました。") + print(result.stdout) + except subprocess.CalledProcessError as e: + print("ffmpegコマンドの実行中にエラーが発生しました。") + print(e.stderr) + + +def video_processing_ui(video_files: list[str], project_name: str): + project_dir = project_manager.make_project(project_name) + + video_files = util.natural_sort(video_files) + video_list_path = make_video_list_file(video_files) + merge_video_path = os.path.join(project_dir, "all_video_merge.webm") + run_ffmpeg_concat_av1(video_list_path, merge_video_path) + timelaps_video_path = os.path.join(project_dir, "timelaps.webm") + run_ffmpeg_timelaps_av1(merge_video_path, timelaps_video_path, 15 * 60) + + return video_files diff --git a/projects/.empty b/projects/.empty new file mode 100644 index 0000000..e69de29 diff --git a/run_dev_env.sh b/run_dev_env.sh index 2606890..c6533db 100755 --- a/run_dev_env.sh +++ b/run_dev_env.sh @@ -31,8 +31,8 @@ PLATFORM="$(uname -m)" if [ $PLATFORM = "x86_64" ]; then echo "x86" - docker pull ghcr.io/moriyalab/horus_inference_server:latest - docker run -it --rm --gpus all --runtime nvidia -u $(id -u):$(id -g) --shm-size=32G -v $ROOT:/workspace/horus_inference_server -w /workspace/horus_inference_server --network host ghcr.io/moriyalab/horus_inference_server:latest + docker pull takanotaiga/horus_inference_server:latest + docker run -it --rm --gpus all,capabilities=video --runtime nvidia --shm-size=32G -u $(id -u):$(id -g) -v $ROOT:/workspace/horus_inference_server -w /workspace/horus_inference_server --network host takanotaiga/horus_inference_server:latest else echo "Not Support Platform. Only support x86." fi \ No newline at end of file